[arvados] created: 2.1.0-2933-g02f2c38ee

git repository hosting git at public.arvados.org
Thu Sep 29 21:10:53 UTC 2022


        at  02f2c38ee8f2a5d2e33a7bd2ff033303e6861aaf (commit)


commit 02f2c38ee8f2a5d2e33a7bd2ff033303e6861aaf
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Thu Sep 29 17:05:49 2022 -0400

    19215: Much installer documentation and reorganization
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/_includes/_install_ca_cert.liquid b/doc/_includes/_install_ca_cert.liquid
index 522a63a03..279356a34 100644
--- a/doc/_includes/_install_ca_cert.liquid
+++ b/doc/_includes/_install_ca_cert.liquid
@@ -4,14 +4,6 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-h2(#ca_root_certificate). Install the CA root certificate (SSL_MODE=self-signed only)
-
-*If you are not using self-signed certificates (you selected SSL_MODE=lets-encrypt or SSL_MODE=bring-your-own), skip this section.*
-
-Arvados uses SSL to encrypt communications. The web interface uses AJAX which will silently fail if the certificate is not valid or signed by an unknown Certification Authority.
-
-For this reason, the @arvados-formula@ has a helper state to create a root certificate to authorize Arvados services. The @provision.sh@ script will leave a copy of the generated CA's certificate (@arvados-snakeoil-ca.pem@) in the script's directory so you can add it to your workstation.
-
 h3. Web Browser
 
 Installing the root certificate into your web browser will prevent security errors when accessing Arvados services with your web browser.
@@ -21,7 +13,7 @@ h4. Chrome
 # Go to "Settings → Privacy and Security → Security → Manage Certificates" or enter @chrome://settings/certificates@ in the URL bar.
 # *Click on the "Authorities" tab*  (it is not selected by default)
 # Click on the "Import" button
-# Choose @arvados-snakeoil-ca.pem@
+# Choose @{{ca_cert_name}}@
 # Tick the checkbox next to "Trust this certificate for identifying websites"
 # Hit OK
 # The certificate should appear in the list of Authorities under "Arvados"
@@ -33,14 +25,14 @@ h4. Firefox
 # Click on the button "View Certificates...".
 # Make sure the "Authorities" tab is selected
 # Press the "Import..." button.
-# Choose @arvados-snakeoil-ca.pem@
+# Choose @{{ca_cert_name}}@
 # Tick the checkbox next to "Trust this CA to identify websites"
 # Hit OK
 # The certificate should appear in the list of Authorities under "Arvados"
 
 h4. Other browsers (Safari, etc)
 
-The process will be similar to that of Chrome and Firefox, but the exact user interface will be different.  If you can't figure it out, try searching for "how do I install a custom certificate authority in <my browser>".
+The process will be similar to that of Chrome and Firefox, but the exact user interface will be different.  If you can't figure it out, try searching for "how do I install a custom certificate authority in (my browser)".
 
 h3. Installation on Linux OS certificate storage
 
@@ -51,7 +43,7 @@ h4. Debian/Ubuntu
 *Important* the certificate file added to @ca-certificates@ must have the extension @.crt@ or it won't be recognized.
 
 <notextile>
-<pre><code>cp arvados-snakeoil-ca.pem /usr/local/share/ca-certificates/arvados-snakeoil-ca.crt
+<pre><code>cp {{ca_cert_name}} /usr/local/share/ca-certificates/arvados-snakeoil-ca.crt
 /usr/sbin/update-ca-certificates
 </code></pre>
 </notextile>
@@ -59,7 +51,7 @@ h4. Debian/Ubuntu
 h4. CentOS
 
 <notextile>
-<pre><code>cp arvados-snakeoil-ca.pem /etc/pki/ca-trust/source/anchors/
+<pre><code>cp {{ca_cert_name}} /etc/pki/ca-trust/source/anchors/
 /usr/bin/update-ca-trust
 </code></pre>
 </notextile>
diff --git a/doc/_includes/_multi_host_install_custom_certificates.liquid b/doc/_includes/_multi_host_install_custom_certificates.liquid
index 6c0207324..7063eb28f 100644
--- a/doc/_includes/_multi_host_install_custom_certificates.liquid
+++ b/doc/_includes/_multi_host_install_custom_certificates.liquid
@@ -4,9 +4,9 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-You will need certificates for each DNS name and DNS wildcard previously described in the "Hosts":#hosts .
+You will need certificates for each DNS name and DNS wildcard previously listed in the "DNS hostnames for each service":#DNS .
 
-To simplify certificate management, we recommend creating a single certificate with all of the hostnames, or creating a wildcard certificate that covers all possible hostnames (with the following patterns in subjectAltName):
+To simplify certificate management, we recommend creating a single certificate for all of the hostnames, or creating a wildcard certificate that covers all possible hostnames (with the following patterns in subjectAltName):
 
 <pre>
 xarv1.example.com
@@ -21,15 +21,15 @@ Copy your certificates to the directory specified with the variable @CUSTOM_CERT
 The script expects cert/key files with these basenames (matching the role except for <i>keepweb</i>, which is split in both <i>download / collections</i>):
 
 # @controller@
-# @websocket@        # note: corresponds to default domain @ws.${CLUSTER}.${DOMAIN}@
-# @keepproxy@        # note: corresponds to default domain @keep.${CLUSTER}.${DOMAIN}@
-# @download@         # Part of keepweb
-# @collections@      # Part of keepweb -- important note, this should be a wildcard for @*.collections.${CLUSTER}.${DOMAIN}@
+# @websocket@        -- note: corresponds to default domain @ws.${CLUSTER}.${DOMAIN}@
+# @keepproxy@        -- note: corresponds to default domain @keep.${CLUSTER}.${DOMAIN}@
+# @download@         -- Part of keepweb
+# @collections@      -- Part of keepweb, must be a wildcard for @*.collections.${CLUSTER}.${DOMAIN}@
 # @workbench@
 # @workbench2@
 # @webshell@
 
-For example, for the 'keepproxy' service the script will expect to find this certificate:
+For example, for the @keepproxy@ service the script will expect to find this certificate:
 
 <notextile>
 <pre><code>${CUSTOM_CERTS_DIR}/keepproxy.crt
diff --git a/doc/_includes/_ssl_config_multi.liquid b/doc/_includes/_ssl_config_multi.liquid
index d001a5f22..b4d6eff61 100644
--- a/doc/_includes/_ssl_config_multi.liquid
+++ b/doc/_includes/_ssl_config_multi.liquid
@@ -4,25 +4,13 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-h2(#certificates). Choose the SSL configuration (SSL_MODE)
+h2(#certificates). Choose the SSL/TLS configuration (SSL_MODE)
 
-Arvados requires an SSL certificate to work correctly. This installer supports these options:
+Arvados requires a valid TLS certificate to work correctly. This installer supports these options:
 
-# @self-signed@: "let the installer create self-signed certificates":#self-signed
 # @lets-encrypt@: "automatically obtain and install an SSL certificates for your hostnames":#lets-encrypt
 # @bring-your-own@: "supply your own certificates in the @certs@ directory":#bring-your-own
 
-h3(#self-signed). Using self-signed certificates
-
-To make the installer use self-signed certificates, change the configuration like this:
-
-<notextile>
-<pre><code>SSL_MODE="self-signed"
-</code></pre>
-</notextile>
-
-Before connecting to the Arvados web interface for the first time, anyone accessing the instance will need to "install the self-signed root certificate in their browser.":#ca_root_certificate
-
 h3(#lets-encrypt). Using a Let's Encrypt certificate
 
 In the default configuration, this installer gets a valid certificate via Let's Encrypt. If you have the <b>CLUSTER.DOMAIN</b> domain in a route53 zone, you can set <b>USE_LETSENCRYPT_ROUTE53</b> to <b>YES</b> and supply appropriate credentials so that Let's Encrypt can use dns-01 validation to get the appropriate certificates.
diff --git a/doc/_includes/_supportedlinux.liquid b/doc/_includes/_supportedlinux.liquid
new file mode 100644
index 000000000..08a20750c
--- /dev/null
+++ b/doc/_includes/_supportedlinux.liquid
@@ -0,0 +1,15 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+table(table table-bordered table-condensed).
+|_. *Supported Linux Distributions*|
+|CentOS 7|
+|Debian 11 ("bullseye")|
+|Debian 10 ("buster")|
+|Ubuntu 20.04 ("focal")|
+|Ubuntu 18.04 ("bionic")|
+
+Arvados packages are published for current Debian releases (until the EOL date), current Ubuntu LTS releases (until the end of standard support), and the latest version of CentOS.
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index 7ee8fbb08..cfdae50eb 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -45,15 +45,9 @@ h2(#v2_4_3). v2.4.3 (2022-09-21)
 
 h3. Fixed PAM authentication security vulnerability
 
-In Arvados 2.4.2 and earlier, when using PAM authentication, if a user
-presented valid credentials but the account is disabled or otherwise
-not allowed to access the host, it would still be accepted for access
-to Arvados.  From 2.4.3 onwards, Arvados now also checks that the
-account is permitted to access the host before completing the PAM login
-process.
-
-Other authentication methods (LDAP, OpenID Connect) are not affected
-by this flaw.
+In Arvados 2.4.2 and earlier, when using PAM authentication, if a user presented valid credentials but the account is disabled or otherwise not allowed to access the host, it would still be accepted for access to Arvados.  From 2.4.3 onwards, Arvados now also checks that the account is permitted to access the host before completing the PAM login process.
+
+Other authentication methods (LDAP, OpenID Connect) are not affected by this flaw.
 
 h2(#v2_4_2). v2.4.2 (2022-08-09)
 
diff --git a/doc/install/arvbox.html.textile.liquid b/doc/install/arvbox.html.textile.liquid
index 378fcc8f8..52dec9067 100644
--- a/doc/install/arvbox.html.textile.liquid
+++ b/doc/install/arvbox.html.textile.liquid
@@ -11,23 +11,50 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvbox is a Docker-based self-contained development, demonstration and testing environment for Arvados.  It is not intended for production use.
 
+h2. Requirements
+
+* Linux 3.x+ and Docker 1.10+
+* Minimum of 4 GiB of RAM  + additional memory to run jobs
+* Minimum of 4 GiB of disk + storage for actual data
+
 h2. Quick start
 
+{% include 'branchname' %}
+
+<notextile>
+<pre><code>$ <span class="userinput">curl -O <a href="https://git.arvados.org/arvados.git/blob_plain/refs/heads/{{branchname}}:/tools/arvbox/bin/arvbox">https://git.arvados.org/arvados.git/blob_plain/refs/heads/{{branchname}}:/tools/arvbox/bin/arvbox</a></span>
+$ <span class="userinput">chmod +x arvbox</span>
+$ <span class="userinput">./arvbox start localdemo</span>
+
+Arvados-in-a-box starting
+
+Waiting for workbench2 websockets workbench webshell keep-web controller keepproxy api keepstore1 arv-git-httpd keepstore0 sdk vm ...
+...
+
+Your Arvados-in-a-box is ready!
+
+$ <span class="userinput">./arvbox adduser demouser demo at example.com</span>
+Password for demouser:
+Added demouser
+</code></pre>
+</notextile>
+
+You will then need to "install the arvbox root certificate":#root-cert .  After that, you can now log in to Workbench as @demouser@ with the password you selected.
+
+h2(#root-cert). Install root certificate
+
+Arvbox creates root certificate to authorize Arvbox services.  Installing the root certificate into your web browser will prevent security errors when accessing Arvbox services with your web browser.  Every  Arvbox instance generates a new root signing key.
+
+Export the root certificate with this command:
+
 <pre>
-$ curl -O https://git.arvados.org/arvados.git/blob_plain/refs/heads/{{ branchname }}:/tools/arvbox/bin/arvbox
-$ chmod +x arvbox
-$ ./arvbox start localdemo
 $ ./arvbox root-cert
-$ ./arvbox adduser demouser demo at example.com
+Certificate copied to /home/ubuntu/arvbox-root-cert.crt
 </pre>
 
-You will then need to "install the arvbox root certificate":#root-cert .  After that, you can now log in to Workbench as @demouser@ with the password you selected.
-
-h2. Requirements
+{% assign ca_cert_name = 'arvbox-root-cert.crt' %}
 
-* Linux 3.x+ and Docker 1.10+
-* Minimum of 3 GiB of RAM  + additional memory to run jobs
-* Minimum of 3 GiB of disk + storage for actual data
+{% include 'install_ca_cert' %}
 
 h2. Usage
 
@@ -62,43 +89,13 @@ pipe               run a bash script piped in from stdin
 sv <start|stop|restart> <service>
                    change state of service inside arvbox
 clone <from> <to>  clone dev arvbox
-adduser <username> <email>
+adduser <username> <email> [password]
                    add a user login
 removeuser <username>
                    remove user login
 listusers          list user logins
 </pre>
 
-h2(#root-cert). Install root certificate
-
-Arvbox creates root certificate to authorize Arvbox services.  Installing the root certificate into your web browser will prevent security errors when accessing Arvbox services with your web browser.  Every  Arvbox instance generates a new root signing key.
-
-# Export the certificate using @arvbox root-cert@
-# Go to the certificate manager in your browser.
-#* In Chrome, this can be found under "Settings → Advanced → Manage Certificates" or by entering @chrome://settings/certificates@ in the URL bar.
-#* In Firefox, this can be found under "Preferences → Privacy & Security" or entering @about:preferences#privacy@ in the URL bar and then choosing "View Certificates...".
-# Select the "Authorities" tab, then press the "Import" button.  Choose @arvbox-root-cert.pem@
-
-The certificate will be added under the "Arvados testing" organization as "arvbox testing root CA".
-
-To access your Arvbox instance using command line clients (such as arv-get and arv-put) without security errors, install the certificate into the OS certificate storage.
-
-h3. On Debian/Ubuntu:
-
-<notextile>
-<pre><code>cp arvbox-root-cert.pem /usr/local/share/ca-certificates/
-/usr/sbin/update-ca-certificates
-</code></pre>
-</notextile>
-
-h3. On CentOS:
-
-<notextile>
-<pre><code>cp arvbox-root-cert.pem /etc/pki/ca-trust/source/anchors/
-/usr/bin/update-ca-trust
-</code></pre>
-</notextile>
-
 h2. Configs
 
 h3. dev
diff --git a/doc/install/configure-s3-object-storage.html.textile.liquid b/doc/install/configure-s3-object-storage.html.textile.liquid
index e9866d510..6a10a3992 100644
--- a/doc/install/configure-s3-object-storage.html.textile.liquid
+++ b/doc/install/configure-s3-object-storage.html.textile.liquid
@@ -9,10 +9,15 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Keepstore can store data in object storage compatible with the S3 API, such as Amazon S3, Google Cloud Storage, or Ceph RADOS.
+Keepstore can store data in object storage compatible with the S3 API, such as Amazon S3, Google Cloud Storage, Ceph RADOS, NetApp StorageGRID, and others.
 
 Volumes are configured in the @Volumes@ section of the cluster configuration file.
 
+# "Configuration example":#example
+# "IAM Policy":#IAM
+
+h2(#example). Configuration example
+
 {% include 'assign_volume_uuid' %}
 
 <notextile><pre><code>    Volumes:
@@ -120,3 +125,24 @@ Two S3 drivers are available. Historically, Arvados has used the @goamz@ driver
 The @aws-sdk-go-v2@ does not support the old S3 v2 signing algorithm. This will not affect interacting with AWS S3, but it might be an issue when Keep is backed by a very old version of a third party S3-compatible service.
 
 The @aws-sdk-go-v2@ driver can improve read performance by 50-100% over the @goamz@ driver, but it has not had as much production use. See the "wiki":https://dev.arvados.org/projects/arvados/wiki/Keep_real_world_performance_numbers for details.
+
+h2(#IAM). IAM Policy
+
+On Amazon, VMs which will access the S3 bucket (these include keepstore and compute nodes) will need an IAM policy with "permission that can read, write, list and delete objects in the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html .  Here is an example policy:
+
+<notextile>
+<pre>
+{
+    "Id": "arvados-keepstore policy",
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Action": [
+                  "s3:*"
+            ],
+            "Resource": "arn:aws:s3:::xarv1-nyw5e-000000000000000-volume"
+        }
+    ]
+}
+</pre>
+</notextile>
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
index eebc0ab7d..b37ddc748 100644
--- a/doc/install/index.html.textile.liquid
+++ b/doc/install/index.html.textile.liquid
@@ -13,13 +13,14 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 This section is about installing an Arvados cluster.  If you are just looking to install Arvados client tools and libraries, "go to the SDK section.":{{site.baseurl}}/sdk/
 {% include 'notebox_end' %}
 
-Arvados components run on GNU/Linux systems, and supports AWS, GCP and Azure cloud platforms as well as on-premises installs.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.  "Arvados is Free Software":{{site.baseurl}}/user/copying/copying.html and self-install installations are not limited in any way.  Commercial support and development are also available from "Curii Corporation.":mailto:info at curii.com
+Arvados components run on supported GNU/Linux distributions. The Arvados elastic compute management layer supports Amazon Web Services (AWS) and Microsoft Azure cloud platforms as well as on-premises installs using SLURM and IBM Spectrum LSF.  The Arvados storage layer supports filesystem storage (including NFS, such as IBM GPFS), Azure blob storage, Amazon S3, and systems that offer an S3-compatible API such as Ceph Object Gateway and NetApp StorageGRID.
+
+"Arvados is Free Software":{{site.baseurl}}/user/copying/copying.html and self-install installations are not limited in any way.  Commercial support and development are also available from "Curii Corporation.":https://www.curii.com/
 
 Arvados components can be installed and configured in a number of different ways.
 
 <div class="offset1">
 table(table table-bordered table-condensed).
-|||\5=. Appropriate for|
 ||_. Setup difficulty|_. Arvados Evaluation|_. Workflow Development|_. Production at Scale|
 |"Arvados-in-a-box":arvbox.html (arvbox)|Easy|yes|limited|no|
 |"Arados Installer":salt-single-host.html (single host)|Easy|yes|limited|no|
@@ -28,4 +29,4 @@ table(table table-bordered table-condensed).
 |"Cluster Operation Subscription supported by Curii":https://curii.com|N/A ^1^|yes|yes|yes|
 </div>
 
-* ^1^ No user installation necessary, Curii run and managed
+^1^ No user installation necessary.  Curii engineers will install and configure Arvados in your own infrastructure.
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index fc432cb37..985161e48 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: installguide
-title: Multi host Arvados
+title: Multi-Host Arvados
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -17,17 +17,15 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 # "Edit local.params":#localparams
 # "Configure Keep storage":#keep
 # "Choose the SSL configuration":#certificates
-## "Using a self-signed certificates":#self-signed
 ## "Using a Let's Encrypt certificates":#lets-encrypt
 ## "Bring your own certificates":#bring-your-own
 # "Create a compute image":#create_a_compute_image
-# "Further customization of the installation":#further_customization
 # "Begin installation":#installation
+# "Further customization of the installation":#further_customization
 # "Confirm the cluster is working":#test-install
 ## "Debugging issues":#debugging
 ## "Iterating on config changes":#iterating
 ## "Common problems and solutions":#common-problems
-# "Install the CA root certificate":#ca_root_certificate
 # "Initial user and login":#initial_user
 # "After the installation":#post_install
 
@@ -57,26 +55,9 @@ When you do so, you need to configure a couple of additional things:
 
 h3(#keep-bucket). S3 Bucket (AWS specific)
 
-We recommend "creating an S3 bucket":https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html for data storage named @${CLUSTER}-nyw5e-000000000000000-volume@
-
-Then create an IAM role called @${CLUSTER}-keepstore-00-iam-role@ which has "permission to read and write the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html .  Here is an example policy:
+We recommend "creating an S3 bucket":https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html for data storage named @${CLUSTER}-nyw5e-000000000000000-volume at .  We recommend creating an IAM role called @${CLUSTER}-keepstore-00-iam-role@ with a "policy that can read, write, list and delete objects in the bucket":configure-s3-object-storage.html#IAM .  With the example cluster id @xargv1@ the bucket would be called @xarv1-nyw5e-000000000000000-volume@ and the role would be called @xarv1-keepstore-00-iam-role at .
 
-<notextile>
-<pre>
-{
-    "Id": "arvados-keepstore policy",
-    "Statement": [
-        {
-            "Effect": "Allow",
-            "Action": [
-                  "s3:*"
-            ],
-            "Resource": "arn:aws:s3:::xarv1-nyw5e-000000000000000-volume"
-        }
-    ]
-}
-</pre>
-</notextile>
+These names are recommended because they are default names used in the configuration template.  If you use different names, you will need to edit the configuration template later.
 
 h2(#hosts). Required hosts
 
@@ -84,7 +65,7 @@ You will need to allocate several hosts (physical or virtual machines) for the f
 
 {% include 'supportedlinux' %}
 
-Allocate the following hosts as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.
+Allocate the following hosts as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.  With the exception of "keep0" and "keep1", all of these hosts should have external (public) IP addresses if you intend for them to be accessible outside of the private network or VPC.
 
 The installer will set up the Arvados services on your machines.  Here is the default assignment of services to machines:
 
@@ -107,28 +88,35 @@ The installer will set up the Arvados services on your machines.  Here is the de
 # SHELL node  (optional)
 ## arvados shell       (recommended hostname @shell.${CLUSTER}.${DOMAIN}@)
 
-Additional prerequisites when preparing machines to run the installer:
-
-# root or passwordless sudo access
-# from the account where you are performing the install, passwordless @ssh@ to each machine (meaning, the client's public key added to @~/.ssh/authorized_keys@ on each node)
+h3(#DNS). DNS hostnames for each service
+
+You will need a DNS entry for each service.  In the default configuration these are:
+
+# @controller.${CLUSTER}.${DOMAIN}@
+# @ws.${CLUSTER}.${DOMAIN}@
+# @keep0.${CLUSTER}.${DOMAIN}@
+# @keep1.${CLUSTER}.${DOMAIN}@
+# @keep.${CLUSTER}.${DOMAIN}@
+# @download.${CLUSTER}.${DOMAIN}@
+# @*.collections.${CLUSTER}.${DOMAIN}@  -- important note, this must be a wildcard DNS, resolving to the @keepweb@ service
+# @workbench.${CLUSTER}.${DOMAIN}@
+# @workbench2.${CLUSTER}.${DOMAIN}@
+# @webshell.${CLUSTER}.${DOMAIN}@
+# @shell.${CLUSTER}.${DOMAIN}@
+
+h3. Additional prerequisites when preparing machines to run the installer
+
+# from the account where you are performing the install, passwordless @ssh@ to each machine
+This means the client's public key should added to @~/.ssh/authorized_keys@ on each node.
+# passwordless @sudo@ access on the account on each machine you will @ssh@ in to
+This usually means adding the account to the @sudo@ group and having a rule like this in @/etc/sudoers.d/arvados_passwordless@ that allows members of group @sudo@ to execute any command without entering a password.
+<pre>%sudo ALL=(ALL:ALL) NOPASSWD:ALL</pre>
 # @git@ installed on each machine
 # port 443 reachable by clients
-# DNS hostnames for each service
-## @controller.${CLUSTER}.${DOMAIN}@
-## @ws.${CLUSTER}.${DOMAIN}@
-## @keep0.${CLUSTER}.${DOMAIN}@
-## @keep1.${CLUSTER}.${DOMAIN}@
-## @keep.${CLUSTER}.${DOMAIN}@
-## @download.${CLUSTER}.${DOMAIN}@
-## @*.collections.${CLUSTER}.${DOMAIN}@  -- important note, this should be a wildcard DNS, going to the keepweb service
-## @workbench.${CLUSTER}.${DOMAIN}@
-## @workbench2.${CLUSTER}.${DOMAIN}@
-## @webshell.${CLUSTER}.${DOMAIN}@
-## @shell.${CLUSTER}.${DOMAIN}@
-
-(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to create EC2 instances, see here for details .":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html
-
-If your infrastructure differs from the setup proposed above (ie, different hostnames, or using an external DB server such as AWS RDS), you can still use the installer, but "additional customization may be necessary":#further_customization .
+
+(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to manage EC2 instances.":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html#IAM
+
+If your infrastructure differs from the setup proposed above (ie, different hostnames), you can still use the installer, but "additional customization may be necessary":#further_customization .
 
 h2(#download). Download the installer
 
@@ -142,7 +130,7 @@ This can be found wherever you choose to initialize the install files (@~/setup-
 
 # Set @CLUSTER@ to the 5-character cluster identifier (e.g "xarv1")
 # Set @DOMAIN@ to the base DNS domain of the environment, e.g. "example.com"
-# Edit Internal IP settings. Since services share hosts, some hosts are the same.
+# Edit Internal IP settings. Since services share hosts, some hosts are the same.  See "note about /etc/hosts":#etchosts
 # Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC.
 CIDR stands for "Classless Inter-Domain Routing" and describes which portion of the IP address that refers to the network.  For example 192.168.3.0/24 means that the first 24 bits are the network (192.168.3) and the last 8 bits are a specific host on that network.
 _AWS Specific: Go to the AWS console and into the VPC service, there is a column in this table view of the VPCs that gives the CIDR for the VPC (IPv4 CIDR)._
@@ -153,62 +141,46 @@ _AWS Specific: Go to the AWS console and into the VPC service, there is a column
   tr -dc A-Za-z0-9 </dev/urandom | head -c 32 ; echo ''
 done
 </code></pre>
-# Set @DATABASE_PASSWORD@ to a random string
+# Set @DATABASE_PASSWORD@ to a random string (unless you "already have a database":#ext-database then you should set it to that database's password)
    Important! If this contains any non-alphanumeric characters, in particular ampersand ('&'), it is necessary to add backslash quoting.
-   For example, if the password is `Cq&WU<A']p?j`
+   For example, if the password is @Lq&MZ<V']d?j@
    With backslash quoting the special characters it should appear like this in local.params:
-<pre><code>DATABASE_PASSWORD="Cq\&WU\<A\'\]p\?j"</code></pre>
+<pre><code>DATABASE_PASSWORD="Lq\&MZ\<V\'\]d\?j"</code></pre>
 
-h2(#keep). Configure Keep storage
+h3(#etchosts). Note on @/etc/hosts@
 
-The @multi_host/aws@ template uses S3 for storage.  Arvados also supports "filesystem storage":configure-fs-storage.html and "Azure blob storage":configure-azure-blob-storage.html .  Keep storage configuration can be found in in the section @arvados.cluster.Volumes@ of @local_config_dir/pillars/arvados.sls at .
+Because Arvados services are typically accessed by external clients, they are likely to have both a public IP address and a internal IP address.
 
-h3. Object storage in S3 (AWS Specific)
+On cloud providers such as AWS, sending internal traffic to a service's public IP address can incur egress costs and throttling.  Thus it is very important for internal traffic to stay on the internal network.  The installer implements this by updating @/etc/hosts@ on each node to associate each service's hostname with the internal IP address, so that when Arvados services communicate with one another, they always use the internal network address.  This is NOT a substitute for DNS, you still need to set up DNS names for all of the services that have public IP addresses (it does, however, avoid a complex "split-horizon" DNS configuration).
 
-Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
+It is important to be aware of this because if you mistype the IP address for any of the @*_INT_IP@ variables, hosts may unexpectedly fail to be able to communicate with one another.  If this happens, check and edit as necessary the file @/etc/hosts@ on the host that is failing to make an outgoing connection.
 
-# In the @arvados.cluster.Volumes@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
-# Set @Bucket@ to the value of "keepstore role you created earlier":#keep-bucket
-# Set @IAMRole@ to "keepstore role you created earlier":#keep-bucket
-
-{% include 'ssl_config_multi' %}
-
-h2(#create_a_compute_image). Create a compute image
-
-{% include 'branchname' %}
-
-On cloud installations, containers are dispatched in Docker daemons running in the _compute instances_, which need some additional setup.
+h2(#keep). Configure Keep storage
 
-*Start by following "the instructions build a cloud compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html using the "compute image builder script":https://github.com/arvados/arvados/tree/{{ branchname }}/tools/compute-images* .
+The @multi_host/aws@ template uses S3 for storage.  Arvados also supports "filesystem storage":configure-fs-storage.html and "Azure blob storage":configure-azure-blob-storage.html .  Keep storage configuration can be found in in the @arvados.cluster.Volumes@ section of @local_config_dir/pillars/arvados.sls at .
 
-Once you have that image created, Open @local_config_dir/pillars/arvados.sls@ and edit as follows (AWS specific settings described here, configuration for Azure is similar):
+h3. Object storage in S3 (AWS Specific)
 
-# In the @arvados.cluster.Containers.CloudVMs@ section:
-## Set @ImageID@ to the AMI produced by Packer
-## Set @Region@ to the appropriate AWS region
-## Set @AdminUsername@ to the admin user account on the image
-## Set the @SecurityGroupIDs@ list to the VPC security group which you set up to allow SSH connections to these nodes
-## Set @SubnetID@ to the value of SubnetId of your VPC
-# Update @arvados.cluster.Containers.DispatchPrivateKey@ and paste the contents of the @~/.ssh/id_dispatcher@ file you generated in an earlier step.
-# Update @arvados.cluster.InstanceTypes@ as necessary. If m5/c5 node types are not available, replace them with m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed.
+Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
 
-h2(#further_customization). Further customization of the installation (optional)
+# In the @arvados.cluster.Volumes.DriverParameters@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
 
-If you are installing on AWS and following the naming conventions recommend in this guide, then likely no further configuration is necessary and you can begin installation.
+If you did not "follow the recommendend naming scheme":#keep-bucket for either the bucket or role, you'll need to update these parameters as well:
 
-A couple of common customizations are described here.  Other changes may require editing the Saltstack pillars and states files found in @local_config_dir at .  In particular, @local_config_dir/pillars/arvados.sls@ has the template used to produce the Arvados configuration file that is distributed to all the nodes.
+# Set @Bucket@ to the value of "keepstore bucket you created earlier":#keep-bucket
+# Set @IAMRole@ to "keepstore role you created earlier":#keep-bucket
 
-Any extra salt _state_ files you add under @local_config_dir/states@ will be added to the salt run and applied to the hosts.
+{% include 'ssl_config_multi' %}
 
-h3(#authentication). Using a different authentication provider
+h2(#authentication). Configure your authentication provider (optional, recommended)
 
 By default, the installer will use the "Test" provider, which is a list of usernames and cleartext passwords stored in the Arvados config file.  *This is low security configuration and you are strongly advised to configure one of the other "supported authentication methods":setup-login.html* .
 
-h3(#ext-database). Using an external database (optional)
+h2(#ext-database). Using an external database (optional)
 
-Arvados requires a database that is compatible with PostgreSQL 9.5 or later.
+The standard behavior of the installer is to install and configure PostgreSQL for use by Arvados.  You can optionally configure it to use a separately managed database instead.
 
-For example, Arvados is known to work with Amazon Aurora (note: even idle, Arvados constantly accesses the database, so we strongly advise using "provisioned" mode).
+Arvados requires a database that is compatible with PostgreSQL 9.5 or later.  For example, Arvados is known to work with Amazon Aurora (note: even idle, Arvados services will periodically poll the database, so we strongly advise using "provisioned" mode).
 
 # In @local.params@, remove 'database' from the list of roles assigned to the controller node:
 <pre><code>NODES=(
@@ -222,19 +194,45 @@ For example, Arvados is known to work with Amazon Aurora (note: even idle, Arvad
 # In @local.params@, set @DATABASE_PASSWORD@ to the correct value.  "See the previous section describing correct quoting":#localparams
 # In @local_config_dir/pillars/arvados.sls@ you may need to adjust the database name and user.  This can be found in the section @arvados.cluster.database at .
 
+h2(#further_customization). Further customization of the installation (optional)
+
+If you are installing on AWS and have followed all of the naming conventions recommend in this guide, you probably don't need to do any further customization.
+
+If you are installing on a different cloud provider or on HPC, other changes may require editing the Saltstack pillars and states files found in @local_config_dir at .  In particular, @local_config_dir/pillars/arvados.sls@ contains the template (in the @arvados.cluster@ section) used to produce the Arvados configuration file that is distributed to all the nodes.  Consult the "Configuration reference":config.html for a comprehensive list of configuration keys.
+
+Any extra Salt "state" files you add under @local_config_dir/states@ will be added to the Salt run and applied to the hosts.
+
+h2(#create_a_compute_image). Create a compute image
+
+{% include 'branchname' %}
+
+On cloud installations, containers are dispatched in Docker daemons running in the _compute instances_, which need some additional setup.  If you will use a HPC scheduler such as SLURM you can skip this section.
+
+*Start by following "the instructions to build a cloud compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html using the "compute image builder script":https://github.com/arvados/arvados/tree/{{ branchname }}/tools/compute-images* .
+
+Once you have that image created, Open @local_config_dir/pillars/arvados.sls@ and edit as follows (AWS specific settings described here, other cloud providers will have similar settings in their respective configuration section):
+
+# In the @arvados.cluster.Containers.CloudVMs@ section:
+## Set @ImageID@ to the AMI produced by Packer
+## Set @DriverParameters.Region@ to the appropriate AWS region
+## Set @DriverParameters.AdminUsername@ to the admin user account on the image
+## Set the @DriverParameters.SecurityGroupIDs@ list to the VPC security group which you set up to allow SSH connections to these nodes
+## Set @DriverParameters.SubnetID@ to the value of SubnetId of your VPC
+# Update @arvados.cluster.Containers.DispatchPrivateKey@ and paste the contents of the @~/.ssh/id_dispatcher@ file you generated in an earlier step.
+# Update @arvados.cluster.InstanceTypes@ as necessary.  The example instance types are for AWS, other cloud providers will of course have different instance types with different names and specifications.
+(AWS specific) If m5/c5 node types are not available, replace them with m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed.
+
 h2(#installation). Begin installation
 
 At this point, you are ready to run the installer script in deploy mode that will conduct all of the Arvados installation.
 
-Run this in @~/arvados-setup-xarv1@:
+Run this in the @~/arvados-setup-xarv1@ directory:
 
 <pre>
 ./installer.sh deploy
 </pre>
 
-This will deploy all the nodes.  It will take a while and produce a lot of logging.  If it runs into an error, it will stop.
-
-{% include 'install_ca_cert' %}
+This will install and configure Arvados on all the nodes.  It will take a while and produce a lot of logging.  If it runs into an error, it will stop.
 
 h2(#test-install). Confirm the cluster is working
 
@@ -242,9 +240,9 @@ When everything has finished, you can run the diagnostics.
 
 Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client at .
 
-If you are running the diagnostics from one of the Arvados machines inside the VPC, you want @-internal-client@ .
+If you are running the diagnostics from one of the Arvados machines inside the private network, you want @-internal-client@ .
 
-You are an "external client" if you running the diagnostics from your workstation outside of the VPC.
+You are an "external client" if you running the diagnostics from your workstation outside of the private network.
 
 <pre>
 ./installer.sh diagnostics (-internal-client|-external-client)
@@ -252,6 +250,8 @@ You are an "external client" if you running the diagnostics from your workstatio
 
 h3(#debugging). Debugging issues
 
+The installer records log files for each deployment.
+
 Most service logs go to @/var/log/syslog at .
 
 The logs for Rails API server and for Workbench can be found in
@@ -292,7 +292,7 @@ If this happens, you need to
 
 1. correct the database information
 2. run @./installer.sh deploy xarv1.example.com@ to update the configuration on the API/controller node
-3. On the API/controller server node, run this command to re-run the post-install script, which will set up the database:
+3. Log in to the API/controller server node, then run this command to re-run the post-install script, which will set up the database:
 
 <pre>
 dpkg-reconfigure arvados-api-server
@@ -310,9 +310,9 @@ At this point you should be able to log into the Arvados cluster. The initial UR
 
 https://workbench.${CLUSTER}.${DOMAIN}
 
-If you did not "configure a different authentication provider":#authentication you will be using the "Test" provider, and the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.  It uses the values of @INITIAL_USER@ and @INITIAL_USER_PASSWORD@ the @local.params@ file.
+If you did *not* "configure a different authentication provider":#authentication you will be using the "Test" provider, and the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.  It uses the values of @INITIAL_USER@ and @INITIAL_USER_PASSWORD@ the @local.params@ file.
 
-If you did configure a different authentication provider, the first user to log in will automatically be given Arvados admin privileges.
+If you *did* configure a different authentication provider, the first user to log in will automatically be given Arvados admin privileges.
 
 h2(#post_install). After the installation
 
@@ -320,6 +320,6 @@ As part of the operation of @installer.sh@, it automatically creates a @git@ rep
 
 As described in "Iterating on config changes":#iterating you may use @installer.sh deploy@ to re-run the Salt to deploy configuration changes and upgrades.  However, be aware that the configuration templates created for you by @installer.sh@ are a snapshot which are not automatically kept up to date.
 
-When deploying upgrades, consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls at .
+When deploying upgrades, consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls at .  To specify the version to upgrade to, set the @VERSION@ parameter in @local.params at .
 
 See also "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.
diff --git a/doc/install/salt-single-host.html.textile.liquid b/doc/install/salt-single-host.html.textile.liquid
index ef1633b34..aab2c9968 100644
--- a/doc/install/salt-single-host.html.textile.liquid
+++ b/doc/install/salt-single-host.html.textile.liquid
@@ -85,6 +85,14 @@ arvados: Failed:      0
 </code></pre>
 </notextile>
 
+h2(#ca_root_certificate). Install the CA root certificate (SSL_MODE=self-signed only)
+
+*If you are not using self-signed certificates (you selected SSL_MODE=lets-encrypt or SSL_MODE=bring-your-own), skip this section.*
+
+Arvados uses SSL to encrypt communications. The web interface uses AJAX which will silently fail if the certificate is not valid or signed by an unknown Certification Authority.
+
+For this reason, the @arvados-formula@ has a helper state to create a root certificate to authorize Arvados services. The @provision.sh@ script will leave a copy of the generated CA's certificate (@arvados-snakeoil-ca.pem@) in the script's directory so you can add it to your workstation.
+
 {% include 'install_ca_cert' %}
 
 h2(#initial_user). Initial user and login
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index e5ff7be4e..91ade4212 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -90,10 +90,12 @@ deploynode() {
 	exit 1
     fi
 
+    logfile=deploy-${NODE}-$(date -Iseconds).log
+
     if [[ "$NODE" = localhost ]] ; then
-	sudo ./provision.sh --config ${CONFIG_FILE} --roles ${ROLES}
+	sudo ./provision.sh --config ${CONFIG_FILE} --roles ${ROLES} 2>&1 | tee $logfile
     else
-	ssh $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} --roles ${ROLES}"
+	ssh $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} --roles ${ROLES}" 2>&1 | tee $logfile
     fi
 }
 
@@ -152,7 +154,9 @@ case "$subcmd" in
 	cp -r config_examples/$SLS $SETUPDIR/${CONFIG_DIR}
 
 	cd $SETUPDIR
-	git add *.sh ${CONFIG_FILE} ${CONFIG_DIR} tests
+	echo '*.log' > .gitignore
+
+	git add *.sh ${CONFIG_FILE} ${CONFIG_DIR} tests .gitignore
 	git commit -m"initial commit"
 
 	echo "setup directory initialized, now go to $SETUPDIR, edit '${CONFIG_FILE}' and '${CONFIG_DIR}' as needed, then run 'installer.sh deploy'"

commit cb9f22b9dd8859cddcbf844352ad83cff1b7194a
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Fri Sep 23 18:10:27 2022 -0400

    19215: Lots more edits
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/_includes/_install_ca_cert.liquid b/doc/_includes/_install_ca_cert.liquid
index 0be6df430..522a63a03 100644
--- a/doc/_includes/_install_ca_cert.liquid
+++ b/doc/_includes/_install_ca_cert.liquid
@@ -6,6 +6,8 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#ca_root_certificate). Install the CA root certificate (SSL_MODE=self-signed only)
 
+*If you are not using self-signed certificates (you selected SSL_MODE=lets-encrypt or SSL_MODE=bring-your-own), skip this section.*
+
 Arvados uses SSL to encrypt communications. The web interface uses AJAX which will silently fail if the certificate is not valid or signed by an unknown Certification Authority.
 
 For this reason, the @arvados-formula@ has a helper state to create a root certificate to authorize Arvados services. The @provision.sh@ script will leave a copy of the generated CA's certificate (@arvados-snakeoil-ca.pem@) in the script's directory so you can add it to your workstation.
diff --git a/doc/_includes/_multi_host_install_custom_certificates.liquid b/doc/_includes/_multi_host_install_custom_certificates.liquid
index 40d24449f..6c0207324 100644
--- a/doc/_includes/_multi_host_install_custom_certificates.liquid
+++ b/doc/_includes/_multi_host_install_custom_certificates.liquid
@@ -42,7 +42,10 @@ Make sure that all the FQDNs that you will use for the public-facing application
 Note: because the installer currently looks for a different certificate file for each service, if you use a single certificate, we recommend creating a symlink for each certificate and key file to the primary certificate and key, e.g.
 
 <notextile>
-<pre><code>ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/keepproxy.crt
+<pre><code>ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/controller.crt
+ln -s xarv1.key ${CUSTOM_CERTS_DIR}/controller.key
+ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/keepproxy.crt
 ln -s xarv1.key ${CUSTOM_CERTS_DIR}/keepproxy.key
+...
 </code></pre>
 </notextile>
diff --git a/doc/install/arvbox.html.textile.liquid b/doc/install/arvbox.html.textile.liquid
index 3c86721c5..378fcc8f8 100644
--- a/doc/install/arvbox.html.textile.liquid
+++ b/doc/install/arvbox.html.textile.liquid
@@ -14,7 +14,7 @@ Arvbox is a Docker-based self-contained development, demonstration and testing e
 h2. Quick start
 
 <pre>
-$ curl -O https://git.arvados.org/arvados.git/blob_plain/refs/heads/main:/tools/arvbox/bin/arvbox
+$ curl -O https://git.arvados.org/arvados.git/blob_plain/refs/heads/{{ branchname }}:/tools/arvbox/bin/arvbox
 $ chmod +x arvbox
 $ ./arvbox start localdemo
 $ ./arvbox root-cert
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index ba24c17ea..ed5ccb9ee 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -185,7 +185,9 @@ For @ClusterID@, fill in your cluster ID. The @VPC@ and @Subnet@ should be confi
 
 h3(#aws-ebs-autoscaler). Autoscaling compute node scratch space
 
-If you want to add the "AWS EBS autoscaler":https://github.com/awslabs/amazon-ebs-autoscale daemon in your images, add the @--aws-ebs-autoscale@ flag to the "the build script":#building. Doing so will make the compute image scratch space scale automatically as needed.
+Arvados supports "AWS EBS autoscaler":https://github.com/awslabs/amazon-ebs-autoscale .  This feature automatically expands the scratch space on the compute node on demand by 200 GB at a time, up to 5 TB.
+
+If you want to add the daemon in your images, add the @--aws-ebs-autoscale@ flag to the "the build script":#building.
 
 The AWS EBS autoscaler daemon will be installed with this configuration:
 
@@ -214,53 +216,9 @@ The AWS EBS autoscaler daemon will be installed with this configuration:
 }
 </code></pre></notextile>
 
-Changing the configuration is left as an exercise for the reader.
-
-Using this feature also requires a few Arvados configuration changes in @config.yml@:
-
-* The @Containers/InstanceTypes@ list should be modified so that all @AddedScratch@ lines are removed, and the @IncludedScratch@ value should be set to a (fictional) high number. This way, the scratch space requirements will be met by all the defined instance type. For example:
-
-<notextile><pre><code>    InstanceTypes:
-      c5large:
-        ProviderType: c5.large
-        VCPUs: 2
-        RAM: 4GiB
-        IncludedScratch: 16TB
-        Price: 0.085
-      m5large:
-        ProviderType: m5.large
-        VCPUs: 2
-        RAM: 8GiB
-        IncludedScratch: 16TB
-        Price: 0.096
-...
-</code></pre></notextile>
-
-* You will also need to create an IAM role in AWS with these permissions:
-
-<notextile><pre><code>{
-    "Version": "2012-10-17",
-    "Statement": [
-        {
-            "Effect": "Allow",
-            "Action": [
-                "ec2:AttachVolume",
-                "ec2:DescribeVolumeStatus",
-                "ec2:DescribeVolumes",
-                "ec2:DescribeTags",
-                "ec2:ModifyInstanceAttribute",
-                "ec2:DescribeVolumeAttribute",
-                "ec2:CreateVolume",
-                "ec2:DeleteVolume",
-                "ec2:CreateTags"
-            ],
-            "Resource": "*"
-        }
-    ]
-}
-</code></pre></notextile>
+Changing the ebs-autoscale configuration is left as an exercise for the reader.
 
-Then, in @config.yml@ set @Containers/CloudVMs/DriverParameters/IAMInstanceProfile@ to the name of the IAM role. This will make @arvados-dispatch-cloud@ pass an IAMInstanceProfile to the compute nodes as they start up, giving them sufficient permissions to attach and grow EBS volumes.
+This feature also requires a few Arvados configuration changes, described in "EBS-Autoscale configuration"#aws-ebs-autoscaler .
 
 h2(#azure). Build an Azure image
 
diff --git a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
index 779071d4a..4f872911b 100644
--- a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
@@ -33,7 +33,7 @@ h2(#update-config). Update config.yml
 
 h3. Configure CloudVMs
 
-Add or update the following portions of your cluster configuration file, @config.yml at . Refer to "config.defaults.yml":{{site.baseurl}}/admin/config.html for information about additional configuration options. The @DispatchPrivateKey@ should be the *private* key generated in "the previous section":install-compute-node.html#sshkeypair.
+Add or update the following portions of your cluster configuration file, @config.yml at . Refer to "config.defaults.yml":{{site.baseurl}}/admin/config.html for information about additional configuration options. The @DispatchPrivateKey@ should be the *private* key generated in "Create a SSH keypair":install-compute-node.html#sshkeypair .
 
 <notextile>
 <pre><code>    Services:
@@ -76,7 +76,7 @@ Add or update the following portions of your cluster configuration file, @config
 
 h3(#GPUsupport). NVIDIA GPU support
 
-To specify instance types with NVIDIA GPUs, you must include an additional @CUDA@ section:
+To specify instance types with NVIDIA GPUs, "the compute image must be built with CUDA support":install-compute-node.html#nvidia , and you must include an additional @CUDA@ section:
 
 <notextile>
 <pre><code>    InstanceTypes:
@@ -95,15 +95,64 @@ To specify instance types with NVIDIA GPUs, you must include an additional @CUDA
 
 The @DriverVersion@ is the version of the CUDA toolkit installed in your compute image (in X.Y format, do not include the patchlevel).  The @HardwareCapability@ is the CUDA compute capability of the GPUs available for this instance type.  The @DeviceCount@ is the number of GPU cores available for this instance type.
 
+h3(#aws-ebs-autoscaler). EBS Autoscale configuration
+
+See "Autoscaling compute node scratch space":install-compute-node.html#aws-ebs-autoscaler for details about compute image configuration.
+
+The @Containers.InstanceTypes@ list should be modified so that all @AddedScratch@ lines are removed, and the @IncludedScratch@ value should be set to 5 TB. This way, the scratch space requirements will be met by all the defined instance type. For example:
+
+<notextile><pre><code>    InstanceTypes:
+      c5large:
+        ProviderType: c5.large
+        VCPUs: 2
+        RAM: 4GiB
+        IncludedScratch: 5TB
+        Price: 0.085
+      m5large:
+        ProviderType: m5.large
+        VCPUs: 2
+        RAM: 8GiB
+        IncludedScratch: 5TB
+        Price: 0.096
+...
+</code></pre></notextile>
+
+You will also need to create an IAM role in AWS with these permissions:
+
+<notextile><pre><code>{
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Action": [
+                "ec2:AttachVolume",
+                "ec2:DescribeVolumeStatus",
+                "ec2:DescribeVolumes",
+                "ec2:DescribeTags",
+                "ec2:ModifyInstanceAttribute",
+                "ec2:DescribeVolumeAttribute",
+                "ec2:CreateVolume",
+                "ec2:DeleteVolume",
+                "ec2:CreateTags"
+            ],
+            "Resource": "*"
+        }
+    ]
+}
+</code></pre></notextile>
+
+Then set @Containers.CloudVMs.DriverParameters.IAMInstanceProfile@ to the name of the IAM role. This will make @arvados-dispatch-cloud@ pass an IAM instance profile to the compute nodes when they start up, giving them sufficient permissions to attach and grow EBS volumes.
+
 h3. AWS Credentials for Local Keepstore on Compute node
 
-When @Containers/LocalKeepBlobBuffersPerVCPU@ is non-zero, the compute node will spin up a local Keepstore service for faster storage access. If Keep is backed by S3, the compute node will need to be able to access the S3 bucket.
+When @Containers.LocalKeepBlobBuffersPerVCPU@ is non-zero, the compute node will spin up a local Keepstore service for direct storage access. If Keep is backed by S3, the compute node will need to be able to access the S3 bucket.
+
+If the AWS credentials for S3 access are configured in @config.yml@ (i.e. @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@), these credentials will be made available to the local Keepstore on the compute node to access S3 directly and no further configuration is necessary.
 
-If the AWS credentials for S3 access are configured in @config.yml@ (i.e. @Volumes/DriverParameters/AccessKeyID@ and @Volumes/DriverParameters/SecretAccessKey@), these credentials will be made available to the local Keepstore on the compute node to access S3 directly and no further configuration is necessary.
+Alternatively, if an IAM role is configured in @config.yml@ (i.e. @Volumes.DriverParameters.IAMRole@), the name of an instance profile that corresponds to this role ("often identical to the name of the IAM role":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile) must be configured in the @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter.
 
-Alternatively, if an IAM role is configured in @config.yml@ (i.e. @Volumes/DriverParameters/IAMRole@), the name of an instance profile that corresponds to this role ("often identical to the name of the IAM role":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile) must be configured in the @CloudVMs/DriverParameters/IAMInstanceProfile@ parameter.
+*If you are also using EBS Autoscale feature, the role in IAMInstanceProfile must have both ec2 and s3 permissions.*
 
-Finally, if @config.yml@ does not have @Volumes/DriverParameters/AccessKeyID@, @Volumes/DriverParameters/SecretAccessKey@ or @Volumes/DriverParameters/IAMRole@ defined, Keepstore uses the IAM role attached to the node, whatever it may be called. The @CloudVMs/DriverParameters/IAMInstanceProfile@ parameter must then still be configured with the name of a profile whose IAM role has permission to access the S3 bucket(s). That way, @arvados-dispatch-cloud@ can attach the IAM role to the compute node as it is created.
+Finally, if @config.yml@ does not have @Volumes.DriverParameters.AccessKeyID@, @Volumes.DriverParameters.SecretAccessKey@ or @Volumes.DriverParameters.IAMRole@ defined, Keepstore uses the IAM role attached to the node, whatever it may be called. The @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter must then still be configured with the name of a profile whose IAM role has permission to access the S3 bucket(s). That way, @arvados-dispatch-cloud@ can attach the IAM role to the compute node as it is created.
 
 h3. Minimal configuration example for Amazon EC2
 
@@ -130,14 +179,13 @@ The <span class="userinput">ImageID</span> value is the compute node image that
 </code></pre>
 </notextile>
 
-h3(#IAM). Example IAM policy
+h3(#IAM). Example IAM policy for cloud dispatcher
 
 Example policy for the IAM role used by the cloud dispatcher:
 
 <notextile>
 <pre>
 {
-    "Version": "2012-10-17",
     "Id": "arvados-dispatch-cloud policy",
     "Statement": [
         {
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 4be657f1e..fc432cb37 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 # "Introduction":#introduction
 # "Prerequisites and planning":#prerequisites
-# "Hosts":#hosts
+# "Required hosts":#hosts
 # "Download the installer":#download
 # "Initialize the installer":#copy_config
 # "Edit local.params":#localparams
@@ -23,7 +23,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 # "Create a compute image":#create_a_compute_image
 # "Further customization of the installation":#further_customization
 # "Begin installation":#installation
-## "Run diagnostics to confirming the cluster is working":#test-install
+# "Confirm the cluster is working":#test-install
 ## "Debugging issues":#debugging
 ## "Iterating on config changes":#iterating
 ## "Common problems and solutions":#common-problems
@@ -55,23 +55,36 @@ When you do so, you need to configure a couple of additional things:
 # You should set up a "security group which allows SSH access (port 22)":https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html
 # Make sure to add a "VPC S3 endpoint":https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html
 
-h3. S3 Bucket (AWS specific)
+h3(#keep-bucket). S3 Bucket (AWS specific)
 
 We recommend "creating an S3 bucket":https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html for data storage named @${CLUSTER}-nyw5e-000000000000000-volume@
 
-Then create an IAM role called @${CLUSTER}-keepstore-00-iam-role@ which has "permission to read and write the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html
-
-h3. Other IAM Roles (AWS specific)
-
+Then create an IAM role called @${CLUSTER}-keepstore-00-iam-role@ which has "permission to read and write the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html .  Here is an example policy:
 
+<notextile>
+<pre>
+{
+    "Id": "arvados-keepstore policy",
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Action": [
+                  "s3:*"
+            ],
+            "Resource": "arn:aws:s3:::xarv1-nyw5e-000000000000000-volume"
+        }
+    ]
+}
+</pre>
+</notextile>
 
-h2(#hosts). Hosts
+h2(#hosts). Required hosts
 
 You will need to allocate several hosts (physical or virtual machines) for the fixed infrastructure of the Arvados cluster.  These machines should have at least 2 cores and 8 GiB of RAM, running a supported Linux distribution.
 
 {% include 'supportedlinux' %}
 
-Allocate these as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.
+Allocate the following hosts as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.
 
 The installer will set up the Arvados services on your machines.  Here is the default assignment of services to machines:
 
@@ -113,9 +126,9 @@ Additional prerequisites when preparing machines to run the installer:
 ## @webshell.${CLUSTER}.${DOMAIN}@
 ## @shell.${CLUSTER}.${DOMAIN}@
 
-(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to create EC2 instances, see here for details .":https://doc.arvados.org/v2.4/install/crunch2-cloud/install-dispatch-cloud.html#IAM
+(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to create EC2 instances, see here for details .":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html
 
-If your infrastructure differs from the setup proposed above (ie, different hostnames, or using AWS RDS or an existing DB server), you can still use the installer, but additional customization will be necessary.
+If your infrastructure differs from the setup proposed above (ie, different hostnames, or using an external DB server such as AWS RDS), you can still use the installer, but "additional customization may be necessary":#further_customization .
 
 h2(#download). Download the installer
 
@@ -148,15 +161,15 @@ done
 
 h2(#keep). Configure Keep storage
 
-The @multi_host/aws@ template uses S3 for storage.  Arvados also supports "filesystem storage":configure-fs-storage.html and "Azure blob storage":configure-azure-blob-storage.html .  Keep storage configuration can be found in @local_config_dir/pillars/arvados.sls@ in the section @arvados.cluster.Volumes at .
+The @multi_host/aws@ template uses S3 for storage.  Arvados also supports "filesystem storage":configure-fs-storage.html and "Azure blob storage":configure-azure-blob-storage.html .  Keep storage configuration can be found in in the section @arvados.cluster.Volumes@ of @local_config_dir/pillars/arvados.sls at .
 
 h3. Object storage in S3 (AWS Specific)
 
 Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
 
 # In the @arvados.cluster.Volumes@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
-# Set @IAMRole@ to the name of the `KeepstoreRole` generated by CloudFormation.  Just use the part after the '/' (not the arn:aws:iam.... stuff at the beginning).
-# Set @Bucket@ to the value of `KeepBucket1`
+# Set @Bucket@ to the value of "keepstore role you created earlier":#keep-bucket
+# Set @IAMRole@ to "keepstore role you created earlier":#keep-bucket
 
 {% include 'ssl_config_multi' %}
 
@@ -164,34 +177,56 @@ h2(#create_a_compute_image). Create a compute image
 
 {% include 'branchname' %}
 
-On cloud installations, containers are dispatched in Docker daemons running in the <i>compute instances</i>, which need some special setup.  Follow "the instructions build a cloud compute node image":https://doc.arvados.org/install/crunch2-cloud/install-compute-node.html using the "compute image builder script":https://github.com/arvados/arvados/tree/{{ branchname }}/tools/compute-images  .
+On cloud installations, containers are dispatched in Docker daemons running in the _compute instances_, which need some additional setup.
+
+*Start by following "the instructions build a cloud compute node image":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html using the "compute image builder script":https://github.com/arvados/arvados/tree/{{ branchname }}/tools/compute-images* .
 
 Once you have that image created, Open @local_config_dir/pillars/arvados.sls@ and edit as follows (AWS specific settings described here, configuration for Azure is similar):
 
 # In the @arvados.cluster.Containers.CloudVMs@ section:
-## Set @ImageID@ to the AMI output from Packer
+## Set @ImageID@ to the AMI produced by Packer
 ## Set @Region@ to the appropriate AWS region
 ## Set @AdminUsername@ to the admin user account on the image
 ## Set the @SecurityGroupIDs@ list to the VPC security group which you set up to allow SSH connections to these nodes
 ## Set @SubnetID@ to the value of SubnetId of your VPC
 # Update @arvados.cluster.Containers.DispatchPrivateKey@ and paste the contents of the @~/.ssh/id_dispatcher@ file you generated in an earlier step.
-# Update @arvados.cluster.InstanceTypes@ as necessary. If t3 and m5/c5 node types are not available, replace them with t2 and m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed.
+# Update @arvados.cluster.InstanceTypes@ as necessary. If m5/c5 node types are not available, replace them with m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed.
 
-h2(#further_customization). Further customization of the installation
+h2(#further_customization). Further customization of the installation (optional)
 
 If you are installing on AWS and following the naming conventions recommend in this guide, then likely no further configuration is necessary and you can begin installation.
 
-If your infrastructure differs from the setup proposed above (ie, using AWS RDS or an existing DB server), you can still use the installer, but additional customization will be necessary.
+A couple of common customizations are described here.  Other changes may require editing the Saltstack pillars and states files found in @local_config_dir at .  In particular, @local_config_dir/pillars/arvados.sls@ has the template used to produce the Arvados configuration file that is distributed to all the nodes.
+
+Any extra salt _state_ files you add under @local_config_dir/states@ will be added to the salt run and applied to the hosts.
 
-This is done by editing the Saltstack pillars and states files found in @local_config_dir at .  In particular, @local_config_dir/pillars/arvados.sls@ has the template used to produce the Arvados configuration file that is distributed to all the nodes.
+h3(#authentication). Using a different authentication provider
 
-Any extra salt <i>state</i> file you add under @local_config_dir/states@ will be added to the salt run and applied to the hosts.
+By default, the installer will use the "Test" provider, which is a list of usernames and cleartext passwords stored in the Arvados config file.  *This is low security configuration and you are strongly advised to configure one of the other "supported authentication methods":setup-login.html* .
+
+h3(#ext-database). Using an external database (optional)
+
+Arvados requires a database that is compatible with PostgreSQL 9.5 or later.
+
+For example, Arvados is known to work with Amazon Aurora (note: even idle, Arvados constantly accesses the database, so we strongly advise using "provisioned" mode).
+
+# In @local.params@, remove 'database' from the list of roles assigned to the controller node:
+<pre><code>NODES=(
+  [controller.${CLUSTER}.${DOMAIN}]=api,controller,websocket,dispatcher,keepbalance
+  ...
+)
+</code></pre>
+# In @local.params@, set @DATABASE_INT_IP@ to the database endpoint (can be a hostname, does not have to be an IP address).
+<pre><code>DATABASE_INT_IP=...
+</code></pre>
+# In @local.params@, set @DATABASE_PASSWORD@ to the correct value.  "See the previous section describing correct quoting":#localparams
+# In @local_config_dir/pillars/arvados.sls@ you may need to adjust the database name and user.  This can be found in the section @arvados.cluster.database at .
 
 h2(#installation). Begin installation
 
 At this point, you are ready to run the installer script in deploy mode that will conduct all of the Arvados installation.
 
-Run this in ~/arvados-setup-xarv1:
+Run this in @~/arvados-setup-xarv1@:
 
 <pre>
 ./installer.sh deploy
@@ -199,13 +234,15 @@ Run this in ~/arvados-setup-xarv1:
 
 This will deploy all the nodes.  It will take a while and produce a lot of logging.  If it runs into an error, it will stop.
 
-h3(#test-install). Run diagnostics to confirming the cluster is working
+{% include 'install_ca_cert' %}
+
+h2(#test-install). Confirm the cluster is working
 
 When everything has finished, you can run the diagnostics.
 
 Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client at .
 
-You are probably an "internal client" if you are running the diagnostics from one of the Arvados machines inside the VPC.
+If you are running the diagnostics from one of the Arvados machines inside the VPC, you want @-internal-client@ .
 
 You are an "external client" if you running the diagnostics from your workstation outside of the VPC.
 
@@ -215,7 +252,7 @@ You are an "external client" if you running the diagnostics from your workstatio
 
 h3(#debugging). Debugging issues
 
-Most service logs go to @/var/log/syslog@
+Most service logs go to @/var/log/syslog at .
 
 The logs for Rails API server and for Workbench can be found in
 
@@ -225,7 +262,7 @@ and
 
 on the appropriate instances.
 
-Workbench2 is a client-side Javascript application, if it having trouble loading, check the browser's developer console.
+Workbench 2 is a client-side Javascript application.  If you are having trouble loading Workbench 2, check the browser's developer console (this can be found in "Tools → Developer Tools").
 
 h3(#iterating). Iterating on config changes
 
@@ -241,10 +278,6 @@ However, once you have a final configuration, you should run a full deploy to en
 
 h3(#common-problems). Common problems and solutions
 
-h4. Missing ENA support (AWS Specific)
-
-If the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start.  You'll see an error in syslog on the node that runs @arvados-dispatch-cloud at .  The solution is to build a new AMI with --aws-ena-support true
-
 h4. PG::UndefinedTable: ERROR:  relation \"api_clients\" does not exist
 
 The arvados-api-server package sets up the database as a post-install script.  If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database.
@@ -267,21 +300,19 @@ dpkg-reconfigure arvados-api-server
 
 4. Re-run @./installer.sh deploy@ again to synchronize everything, and so that the install steps that need to contact the API server are run successfully.
 
-{% include 'install_ca_cert' %}
+h4. Missing ENA support (AWS Specific)
+
+If the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start.  You'll see an error in syslog on the node that runs @arvados-dispatch-cloud at .  The solution is to build a new AMI with --aws-ena-support true
 
 h2(#initial_user). Initial user and login
 
 At this point you should be able to log into the Arvados cluster. The initial URL will be
 
-* https://workbench.${CLUSTER}.${DOMAIN}
-
-By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
+https://workbench.${CLUSTER}.${DOMAIN}
 
-Assuming you didn't change these values in the @local.params@ file, the initial credentials are:
+If you did not "configure a different authentication provider":#authentication you will be using the "Test" provider, and the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.  It uses the values of @INITIAL_USER@ and @INITIAL_USER_PASSWORD@ the @local.params@ file.
 
-* User: 'admin'
-* Password: 'password'
-* Email: 'admin@${CLUSTER}.${DOMAIN}'
+If you did configure a different authentication provider, the first user to log in will automatically be given Arvados admin privileges.
 
 h2(#post_install). After the installation
 
@@ -291,4 +322,4 @@ As described in "Iterating on config changes":#iterating you may use @installer.
 
 When deploying upgrades, consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls at .
 
-See "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.
+See also "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 5e7ae7ca1..bfcfcfc68 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -64,7 +64,7 @@ INITIAL_USER="admin"
 # If not specified, the initial user email will be composed as
 # INITIAL_USER at CLUSTER.DOMAIN
 INITIAL_USER_EMAIL="admin at cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
-INITIAL_USER_PASSWORD="password"
+INITIAL_USER_PASSWORD="fixmepassword"
 
 # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
 BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters

commit 9e5bb48b50f1ccfaab1939d6016f1b21b0802334
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Fri Sep 23 15:34:32 2022 -0400

    19215: Install doc update work in progress checkpoint
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/_config.yml b/doc/_config.yml
index 148e1a166..56ae1bf19 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -209,13 +209,13 @@ navbar:
       - install/index.html.textile.liquid
     - Docker quick start:
       - install/arvbox.html.textile.liquid
-    - Installation with Salt:
+    - Arvados Installer:
       - install/salt-single-host.html.textile.liquid
       - install/salt-multi-host.html.textile.liquid
-    - Arvados on Kubernetes:
-      - install/arvados-on-kubernetes.html.textile.liquid
-      - install/arvados-on-kubernetes-minikube.html.textile.liquid
-      - install/arvados-on-kubernetes-GKE.html.textile.liquid
+#    - Arvados on Kubernetes:
+#      - install/arvados-on-kubernetes.html.textile.liquid
+#      - install/arvados-on-kubernetes-minikube.html.textile.liquid
+#      - install/arvados-on-kubernetes-GKE.html.textile.liquid
     - Manual installation:
       - install/install-manual-prerequisites.html.textile.liquid
       - install/packages.html.textile.liquid
diff --git a/doc/_includes/_install_ca_cert.liquid b/doc/_includes/_install_ca_cert.liquid
index 35d5826de..0be6df430 100644
--- a/doc/_includes/_install_ca_cert.liquid
+++ b/doc/_includes/_install_ca_cert.liquid
@@ -10,29 +10,54 @@ Arvados uses SSL to encrypt communications. The web interface uses AJAX which wi
 
 For this reason, the @arvados-formula@ has a helper state to create a root certificate to authorize Arvados services. The @provision.sh@ script will leave a copy of the generated CA's certificate (@arvados-snakeoil-ca.pem@) in the script's directory so you can add it to your workstation.
 
+h3. Web Browser
+
 Installing the root certificate into your web browser will prevent security errors when accessing Arvados services with your web browser.
 
-# Go to the certificate manager in your browser.
-#* In Chrome, this can be found under "Settings → Advanced → Manage Certificates" or by entering @chrome://settings/certificates@ in the URL bar.
-#* In Firefox, this can be found under "Preferences → Privacy & Security" or entering @about:preferences#privacy@ in the URL bar and then choosing "View Certificates...".
-# Select the "Authorities" tab, then press the "Import" button.  Choose @arvados-snakeoil-ca.pem@
+h4. Chrome
+
+# Go to "Settings → Privacy and Security → Security → Manage Certificates" or enter @chrome://settings/certificates@ in the URL bar.
+# *Click on the "Authorities" tab*  (it is not selected by default)
+# Click on the "Import" button
+# Choose @arvados-snakeoil-ca.pem@
+# Tick the checkbox next to "Trust this certificate for identifying websites"
+# Hit OK
+# The certificate should appear in the list of Authorities under "Arvados"
+
+h4. Firefox
+
+# Go to "Preferences → Privacy & Security" or enter @about:preferences#privacy@ in the URL bar
+# Scroll down to the *Certificates* section
+# Click on the button "View Certificates...".
+# Make sure the "Authorities" tab is selected
+# Press the "Import..." button.
+# Choose @arvados-snakeoil-ca.pem@
+# Tick the checkbox next to "Trust this CA to identify websites"
+# Hit OK
+# The certificate should appear in the list of Authorities under "Arvados"
+
+h4. Other browsers (Safari, etc)
+
+The process will be similar to that of Chrome and Firefox, but the exact user interface will be different.  If you can't figure it out, try searching for "how do I install a custom certificate authority in <my browser>".
+
+h3. Installation on Linux OS certificate storage
 
-The certificate will be added under the "Arvados Formula".
+To access your Arvados instance using command line clients (such as @arv-get@ and @arv-put@) without security errors, install the certificate into the OS certificate storage.
 
-To access your Arvados instance using command line clients (such as arv-get and arv-put) without security errors, install the certificate into the OS certificate storage.
+h4. Debian/Ubuntu
 
-* On Debian/Ubuntu:
+*Important* the certificate file added to @ca-certificates@ must have the extension @.crt@ or it won't be recognized.
 
 <notextile>
-<pre><code>cp arvados-root-cert.pem /usr/local/share/ca-certificates/
+<pre><code>cp arvados-snakeoil-ca.pem /usr/local/share/ca-certificates/arvados-snakeoil-ca.crt
 /usr/sbin/update-ca-certificates
 </code></pre>
 </notextile>
 
-* On CentOS:
+h4. CentOS
 
 <notextile>
-<pre><code>cp arvados-root-cert.pem /etc/pki/ca-trust/source/anchors/
+<pre><code>cp arvados-snakeoil-ca.pem /etc/pki/ca-trust/source/anchors/
 /usr/bin/update-ca-trust
 </code></pre>
 </notextile>
diff --git a/doc/_includes/_multi_host_install_custom_certificates.liquid b/doc/_includes/_multi_host_install_custom_certificates.liquid
index 7672372af..40d24449f 100644
--- a/doc/_includes/_multi_host_install_custom_certificates.liquid
+++ b/doc/_includes/_multi_host_install_custom_certificates.liquid
@@ -4,6 +4,18 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
+You will need certificates for each DNS name and DNS wildcard previously described in the "Hosts":#hosts .
+
+To simplify certificate management, we recommend creating a single certificate with all of the hostnames, or creating a wildcard certificate that covers all possible hostnames (with the following patterns in subjectAltName):
+
+<pre>
+xarv1.example.com
+*.xarv1.example.com
+*.collections.xarv1.example.com
+</pre>
+
+(Replacing xarv1 with your own ${CLUSTER}.${DOMAIN})
+
 Copy your certificates to the directory specified with the variable @CUSTOM_CERTS_DIR@ in the remote directory where you copied the @provision.sh@ script. The provision script will find the certificates there.
 
 The script expects cert/key files with these basenames (matching the role except for <i>keepweb</i>, which is split in both <i>download / collections</i>):
@@ -27,4 +39,10 @@ ${CUSTOM_CERTS_DIR}/keepproxy.key
 
 Make sure that all the FQDNs that you will use for the public-facing applications (API/controller, Workbench, Keepproxy/Keepweb) are reachable.
 
-It may be easier to create a single certificate wh
\ No newline at end of file
+Note: because the installer currently looks for a different certificate file for each service, if you use a single certificate, we recommend creating a symlink for each certificate and key file to the primary certificate and key, e.g.
+
+<notextile>
+<pre><code>ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/keepproxy.crt
+ln -s xarv1.key ${CUSTOM_CERTS_DIR}/keepproxy.key
+</code></pre>
+</notextile>
diff --git a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
index 2a7e10590..779071d4a 100644
--- a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
@@ -130,6 +130,8 @@ The <span class="userinput">ImageID</span> value is the compute node image that
 </code></pre>
 </notextile>
 
+h3(#IAM). Example IAM policy
+
 Example policy for the IAM role used by the cloud dispatcher:
 
 <notextile>
@@ -141,13 +143,19 @@ Example policy for the IAM role used by the cloud dispatcher:
         {
             "Effect": "Allow",
             "Action": [
-                "iam:PassRole",
-                "ec2:DescribeKeyPairs",
-                "ec2:ImportKeyPair",
-                "ec2:RunInstances",
-                "ec2:DescribeInstances",
-                "ec2:CreateTags",
-                "ec2:TerminateInstances"
+                  "ec2:CreateTags",
+                  "ec2:Describe*",
+                  "ec2:CreateImage",
+                  "ec2:CreateKeyPair",
+                  "ec2:ImportKeyPair",
+                  "ec2:DeleteKeyPair",
+                  "ec2:RunInstances",
+                  "ec2:StopInstances",
+                  "ec2:TerminateInstances",
+                  "ec2:ModifyInstanceAttribute",
+                  "ec2:CreateSecurityGroup",
+                  "ec2:DeleteSecurityGroup",
+                  "iam:PassRole"
             ],
             "Resource": "*"
         }
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
index 2bd9710f7..eebc0ab7d 100644
--- a/doc/install/index.html.textile.liquid
+++ b/doc/install/index.html.textile.liquid
@@ -20,15 +20,12 @@ Arvados components can be installed and configured in a number of different ways
 <div class="offset1">
 table(table table-bordered table-condensed).
 |||\5=. Appropriate for|
-||_. Setup difficulty|_. Multiuser/networked access|_. Workflow Development and Testing|_. Large Scale Production|_. Development of Arvados|_. Arvados Evaluation|
-|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|no|yes|no|yes|yes|
-|"Installation with Salt":salt-single-host.html (single host)|Easy|no|yes|no|yes|yes|
-|"Installation with Salt":salt-multi-host.html (multi host)|Moderate|yes|yes|yes|yes|yes|
-|"Arvados on Kubernetes":arvados-on-kubernetes.html|Easy ^1^|yes|yes ^2^|no ^2^|no|yes|
-|"Manual installation":install-manual-prerequisites.html|Hard|yes|yes|yes|no|no|
-|"Cluster Operation Subscription supported by Curii":mailto:info at curii.com|N/A ^3^|yes|yes|yes|yes|yes|
+||_. Setup difficulty|_. Arvados Evaluation|_. Workflow Development|_. Production at Scale|
+|"Arvados-in-a-box":arvbox.html (arvbox)|Easy|yes|limited|no|
+|"Arados Installer":salt-single-host.html (single host)|Easy|yes|limited|no|
+|"Arados Installer":salt-multi-host.html (multi host)|Moderate|yes|yes|yes|
+|"Manual installation":install-manual-prerequisites.html|Difficult|yes|yes|yes|
+|"Cluster Operation Subscription supported by Curii":https://curii.com|N/A ^1^|yes|yes|yes|
 </div>
 
-* ^1^ Assumes a Kubernetes cluster is available
-* ^2^ Arvados on Kubernetes is under development and not yet ready for production use
-* ^3^ No user installation necessary, Curii run and managed
+* ^1^ No user installation necessary, Curii run and managed
diff --git a/doc/install/install-manual-prerequisites.html.textile.liquid b/doc/install/install-manual-prerequisites.html.textile.liquid
index 21b3871e0..784d712f1 100644
--- a/doc/install/install-manual-prerequisites.html.textile.liquid
+++ b/doc/install/install-manual-prerequisites.html.textile.liquid
@@ -24,24 +24,10 @@ The Arvados storage subsystem is called "keep".  The compute subsystem is called
 # "Arvados Cluster ID":#clusterid
 # "DNS and TLS":#dnstls
 
+
 h2(#supportedlinux). Supported GNU/Linux distributions
 
-table(table table-bordered table-condensed).
-|_. Distribution|_. State|_. Last supported Arvados version|
-|CentOS 7|Supported|Latest|
-|Debian 11 ("bullseye")|Supported|Latest|
-|Debian 10 ("buster")|Supported|Latest|
-|Ubuntu 20.04 ("focal")|Supported|Latest|
-|Ubuntu 18.04 ("bionic")|Supported|Latest|
-|Ubuntu 16.04 ("xenial")|EOL|2.1.2|
-|Debian 9 ("stretch")|EOL|2.1.2|
-|Debian 8 ("jessie")|EOL|1.4.3|
-|Ubuntu 14.04 ("trusty")|EOL|1.4.3|
-|Ubuntu 12.04 ("precise")|EOL|8ed7b6dd5d4df93a3f37096afe6d6f81c2a7ef6e (2017-05-03)|
-|Debian 7 ("wheezy")|EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
-|CentOS 6 |EOL|997479d1408139e96ecdb42a60b4f727f814f6c9 (2016-12-28)|
-
-Arvados packages are published for current Debian releases (until the EOL date), current Ubuntu LTS releases (until the end of standard support), and the latest version of CentOS.
+{% include 'supportedlinux' %}
 
 h2(#components). Choosing which components to install
 
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 1a70d46ef..4be657f1e 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -11,23 +11,29 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 # "Introduction":#introduction
 # "Prerequisites and planning":#prerequisites
+# "Hosts":#hosts
 # "Download the installer":#download
 # "Initialize the installer":#copy_config
+# "Edit local.params":#localparams
+# "Configure Keep storage":#keep
 # "Choose the SSL configuration":#certificates
 ## "Using a self-signed certificates":#self-signed
 ## "Using a Let's Encrypt certificates":#lets-encrypt
 ## "Bring your own certificates":#bring-your-own
 # "Create a compute image":#create_a_compute_image
-# "Further customization of the installation (modifying the salt pillars and states)":#further_customization
+# "Further customization of the installation":#further_customization
 # "Begin installation":#installation
+## "Run diagnostics to confirming the cluster is working":#test-install
+## "Debugging issues":#debugging
+## "Iterating on config changes":#iterating
+## "Common problems and solutions":#common-problems
 # "Install the CA root certificate":#ca_root_certificate
 # "Initial user and login":#initial_user
-# "Test the installed cluster running a simple workflow":#test_install
 # "After the installation":#post_install
 
 h2(#introduction). Introduction
 
-This multi host installer is the recommendend way to set up a production Arvados cluster.  These instructions include speciic details for installing on Amazon Web Services (AWS), which are marked as "AWS specific".  However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems.
+This multi host installer is the recommendend way to set up a production Arvados cluster.  These instructions include specific details for installing on Amazon Web Services (AWS), which are marked as "AWS specific".  However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems.
 
 h2(#prerequisites). Prerequisites and planning
 
@@ -55,9 +61,15 @@ We recommend "creating an S3 bucket":https://docs.aws.amazon.com/AmazonS3/latest
 
 Then create an IAM role called @${CLUSTER}-keepstore-00-iam-role@ which has "permission to read and write the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html
 
-h3. Machines
+h3. Other IAM Roles (AWS specific)
 
-You will need to allocate (virtual) machines for the fixed infrastructure of the Arvados cluster.  These machines should have at least 2 cores and 8 GiB of RAM, running a "supported Arvados distribution":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux
+
+
+h2(#hosts). Hosts
+
+You will need to allocate several hosts (physical or virtual machines) for the fixed infrastructure of the Arvados cluster.  These machines should have at least 2 cores and 8 GiB of RAM, running a supported Linux distribution.
+
+{% include 'supportedlinux' %}
 
 Allocate these as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.
 
@@ -101,7 +113,7 @@ Additional prerequisites when preparing machines to run the installer:
 ## @webshell.${CLUSTER}.${DOMAIN}@
 ## @shell.${CLUSTER}.${DOMAIN}@
 
-(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to create EC2 instances, see here for details .":https://doc.arvados.org/v2.4/install/crunch2-cloud/install-dispatch-cloud.html
+(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to create EC2 instances, see here for details .":https://doc.arvados.org/v2.4/install/crunch2-cloud/install-dispatch-cloud.html#IAM
 
 If your infrastructure differs from the setup proposed above (ie, different hostnames, or using AWS RDS or an existing DB server), you can still use the installer, but additional customization will be necessary.
 
@@ -111,16 +123,16 @@ h2(#download). Download the installer
 {% assign config_examples_src = 'multi_host/aws'%}
 {% include 'download_installer' %}
 
-h2. Edit @local.params@
+h2(#localparams). Edit @local.params@
 
 This can be found wherever you choose to initialize the install files (@~/setup-arvados-xarv1@ in these examples).
 
 # Set @CLUSTER@ to the 5-character cluster identifier (e.g "xarv1")
 # Set @DOMAIN@ to the base DNS domain of the environment, e.g. "example.com"
 # Edit Internal IP settings. Since services share hosts, some hosts are the same.
-# Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC
-	AWS Specific: Go to the AWS console and into the VPC service, there is a column in
-	this table view of the VPCs that gives the CIDR for the VPC (IPv4 CIDR).
+# Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC.
+CIDR stands for "Classless Inter-Domain Routing" and describes which portion of the IP address that refers to the network.  For example 192.168.3.0/24 means that the first 24 bits are the network (192.168.3) and the last 8 bits are a specific host on that network.
+_AWS Specific: Go to the AWS console and into the VPC service, there is a column in this table view of the VPCs that gives the CIDR for the VPC (IPv4 CIDR)._
 # Set @INITIAL_USER_EMAIL@ to your email address, as you will be the first admin user of the system.
 # Set each @KEY@ / @TOKEN@ to a random string
 	Here's an easy way to create five random tokens:
@@ -134,13 +146,19 @@ done
    With backslash quoting the special characters it should appear like this in local.params:
 <pre><code>DATABASE_PASSWORD="Cq\&WU\<A\'\]p\?j"</code></pre>
 
-{% include 'ssl_config_multi' %}
+h2(#keep). Configure Keep storage
 
-h2(#create_a_compute_image). Configure Keep on S3 (AWS specific)
+The @multi_host/aws@ template uses S3 for storage.  Arvados also supports "filesystem storage":configure-fs-storage.html and "Azure blob storage":configure-azure-blob-storage.html .  Keep storage configuration can be found in @local_config_dir/pillars/arvados.sls@ in the section @arvados.cluster.Volumes at .
 
-Once you have that image created, Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
+h3. Object storage in S3 (AWS Specific)
 
-1. In the @arvados.cluster.Volumes@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
+Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
+
+# In the @arvados.cluster.Volumes@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
+# Set @IAMRole@ to the name of the `KeepstoreRole` generated by CloudFormation.  Just use the part after the '/' (not the arn:aws:iam.... stuff at the beginning).
+# Set @Bucket@ to the value of `KeepBucket1`
+
+{% include 'ssl_config_multi' %}
 
 h2(#create_a_compute_image). Create a compute image
 
@@ -181,6 +199,8 @@ Run this in ~/arvados-setup-xarv1:
 
 This will deploy all the nodes.  It will take a while and produce a lot of logging.  If it runs into an error, it will stop.
 
+h3(#test-install). Run diagnostics to confirming the cluster is working
+
 When everything has finished, you can run the diagnostics.
 
 Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client at .
@@ -193,7 +213,7 @@ You are an "external client" if you running the diagnostics from your workstatio
 ./installer.sh diagnostics (-internal-client|-external-client)
 </pre>
 
-h3. Diagnosing issues
+h3(#debugging). Debugging issues
 
 Most service logs go to @/var/log/syslog@
 
@@ -213,15 +233,21 @@ You can iterate on the config and maintain the cluster by making changes to @loc
 
 If you are debugging a configuration issue on a specific node, you can speed up the cycle a bit by deploying just one node:
 
- at installer.sh deploy keep0.xarv1.example.com@
+<pre>
+./installer.sh deploy keep0.xarv1.example.com@
+</pre>
 
 However, once you have a final configuration, you should run a full deploy to ensure that the configuration has been synchronized on all the nodes.
 
-h3. Common problems and solutions
+h3(#common-problems). Common problems and solutions
+
+h4. Missing ENA support (AWS Specific)
+
+If the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start.  You'll see an error in syslog on the node that runs @arvados-dispatch-cloud at .  The solution is to build a new AMI with --aws-ena-support true
 
-* (AWS Specific) If the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start.  You'll see an error in syslog on the node that runs @arvados-dispatch-cloud at .  The solution is to build a new AMI with --aws-ena-support true
+h4. PG::UndefinedTable: ERROR:  relation \"api_clients\" does not exist
 
-* The arvados-api-server package sets up the database as a post-install script.  If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database.
+The arvados-api-server package sets up the database as a post-install script.  If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database.
 
 This will manifest as an error like this:
 
@@ -232,14 +258,14 @@ This will manifest as an error like this:
 If this happens, you need to
 
 1. correct the database information
-2. run "installer.sh deploy xngs2.rdcloud.bms.com" to update the configuration on the API/controller node
+2. run @./installer.sh deploy xarv1.example.com@ to update the configuration on the API/controller node
 3. On the API/controller server node, run this command to re-run the post-install script, which will set up the database:
 
 <pre>
 dpkg-reconfigure arvados-api-server
 </pre>
 
-4. Re-run 'installer.sh deploy' again to synchronize everything, and so that the install steps that need to contact the API server are run successfully.
+4. Re-run @./installer.sh deploy@ again to synchronize everything, and so that the install steps that need to contact the API server are run successfully.
 
 {% include 'install_ca_cert' %}
 
@@ -257,98 +283,6 @@ Assuming you didn't change these values in the @local.params@ file, the initial
 * Password: 'password'
 * Email: 'admin@${CLUSTER}.${DOMAIN}'
 
-h2(#test_install). Test the installed cluster running a simple workflow
-
-As part of the installation, the @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@ directory in the @shell@ node. If you want to run it, just ssh to the node, then run:
-
-<notextile>
-<pre><code>cd /tmp/cluster_tests
-sudo /run-test.sh
-</code></pre>
-</notextile>
-
-It will create a test user (by default, the same one as the admin user), upload a small workflow and run it. If everything goes OK, the output should similar to this (some output was shortened for clarity):
-
-<notextile>
-<pre><code>Creating Arvados Standard Docker Images project
-Arvados project uuid is 'arva2-j7d0g-0prd8cjlk6kfl7y'
-{
- ...
- "uuid":"arva2-o0j2j-n4zu4cak5iifq2a",
- "owner_uuid":"arva2-tpzed-000000000000000",
- ...
-}
-Uploading arvados/jobs' docker image to the project
-2.1.1: Pulling from arvados/jobs
-8559a31e96f4: Pulling fs layer
-...
-Status: Downloaded newer image for arvados/jobs:2.1.1
-docker.io/arvados/jobs:2.1.1
-2020-11-23 21:43:39 arvados.arv_put[32678] INFO: Creating new cache file at /home/vagrant/.cache/arvados/arv-put/c59256eda1829281424c80f588c7cc4d
-2020-11-23 21:43:46 arvados.arv_put[32678] INFO: Collection saved as 'Docker image arvados jobs:2.1.1 sha256:0dd50'
-arva2-4zz18-1u5pvbld7cvxuy2
-Creating initial user ('admin')
-Setting up user ('admin')
-{
- "items":[
-  {
-   ...
-   "owner_uuid":"arva2-tpzed-000000000000000",
-   ...
-   "uuid":"arva2-o0j2j-1ownrdne0ok9iox"
-  },
-  {
-   ...
-   "owner_uuid":"arva2-tpzed-000000000000000",
-   ...
-   "uuid":"arva2-o0j2j-1zbeyhcwxc1tvb7"
-  },
-  {
-   ...
-   "email":"admin at arva2.arv.local",
-   ...
-   "owner_uuid":"arva2-tpzed-000000000000000",
-   ...
-   "username":"admin",
-   "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
-   ...
-  }
- ],
- "kind":"arvados#HashList"
-}
-Activating user 'admin'
-{
- ...
- "email":"admin at arva2.arv.local",
- ...
- "username":"admin",
- "uuid":"arva2-tpzed-3wrm93zmzpshrq2",
- ...
-}
-Running test CWL workflow
-INFO /usr/bin/cwl-runner 2.1.1, arvados-python-client 2.1.1, cwltool 3.0.20200807132242
-INFO Resolved 'hasher-workflow.cwl' to 'file:///tmp/cluster_tests/hasher-workflow.cwl'
-...
-INFO Using cluster arva2 (https://arva2.arv.local:8443/)
-INFO Upload local files: "test.txt"
-INFO Uploaded to ea34d971b71d5536b4f6b7d6c69dc7f6+50 (arva2-4zz18-c8uvwqdry4r8jao)
-INFO Using collection cache size 256 MiB
-INFO [container hasher-workflow.cwl] submitted container_request arva2-xvhdp-v1bkywd58gyocwm
-INFO [container hasher-workflow.cwl] arva2-xvhdp-v1bkywd58gyocwm is Final
-INFO Overall process status is success
-INFO Final output collection d6c69a88147dde9d52a418d50ef788df+123
-{
-    "hasher_out": {
-        "basename": "hasher3.md5sum.txt",
-        "class": "File",
-        "location": "keep:d6c69a88147dde9d52a418d50ef788df+123/hasher3.md5sum.txt",
-        "size": 95
-    }
-}
-INFO Final process status is success
-</code></pre>
-</notextile>
-
 h2(#post_install). After the installation
 
 As part of the operation of @installer.sh@, it automatically creates a @git@ repository with your configuration templates.  You should retain this repository but be aware that it contains sensitive information (passwords and tokens used by the Arvados services).
diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname
index 0a9965426..1c72446a2 100644
--- a/tools/salt-install/local.params.example.single_host_single_hostname
+++ b/tools/salt-install/local.params.example.single_host_single_hostname
@@ -22,9 +22,11 @@ NODES=(
   [localhost]=database,api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell,shell
 )
 
-# Set this value when installing a cluster in a single host with a single
-# hostname to access all the instances. HOSTNAME_EXT should be set to the
-# external hostname for the instance.
+# HOSTNAME_EXT must be set to the address that users will use to
+# connect to the instance (e.g. what they will type into the URL bar
+# of the browser to get to workbench).  If you haven't given the
+# instance a working DNS name, you might need to use an IP address
+# here.
 HOSTNAME_EXT="hostname_ext_fixme_or_this_wont_work"
 
 # The internal IP address for the host.
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 4f9c209ef..74bc16493 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -840,17 +840,19 @@ if [ -d /etc/cloud/cloud.cfg.d ]; then
 fi
 
 # Leave a copy of the Arvados CA so the user can copy it where it's required
-if [ "$DEV_MODE" = "yes" ]; then
-  echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
-  # If running in a vagrant VM, also add default user to docker group
+if [ "${SSL_MODE}" = "self-signed" ]; then
+  echo "Copying the Arvados CA certificate '${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.crt' to the installer dir, so you can import it"
   if [ "x${VAGRANT}" = "xyes" ]; then
     cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
+  else
+    cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.crt
+  fi
+fi
 
+if [ "x${VAGRANT}" = "xyes" ]; then
+    # If running in a vagrant VM, also add default user to docker group
     echo "Adding the vagrant user to the docker group"
     usermod -a -G docker vagrant
-  else
-    cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
-  fi
 fi
 
 # Test that the installation finished correctly

commit 7a91ab4360a3578245e5ffb67d0d2ac14778ee01
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Thu Aug 18 18:04:23 2022 -0400

    19215: Incorporate many details into install doc
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/_includes/_download_installer.liquid b/doc/_includes/_download_installer.liquid
index 5dfcd089e..10909088b 100644
--- a/doc/_includes/_download_installer.liquid
+++ b/doc/_includes/_download_installer.liquid
@@ -6,8 +6,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 {% include 'branchname' %}
 
-This procedure will install all the main Arvados components to get you up and running in a single host.
-
 This is a package-based installation method, however the installation script is currently distributed in source form via @git at . We recommend checking out the git tree on your local workstation, not directly on the target(s) where you want to install and run Arvados.
 
 <notextile>
@@ -17,4 +15,16 @@ cd arvados/tools/salt-install
 </code></pre>
 </notextile>
 
-The @provision.sh@ script will help you deploy Arvados by preparing your environment to be able to run the installer, then running it. The actual installer is located in the "arvados-formula git repository":https://git.arvados.org/arvados-formula.git/tree/refs/heads/{{ branchname }} and will be cloned during the running of the @provision.sh@ script.  The installer is built using "Saltstack":https://saltproject.io/ and @provision.sh@ performs the install using master-less mode.
+The @install.sh@ and @provision.sh@ scripts will help you deploy Arvados by preparing your environment to be able to run the installer, then running it. The actual installer is located in the "arvados-formula git repository":https://git.arvados.org/arvados-formula.git/tree/refs/heads/{{ branchname }} and will be cloned during the running of the @provision.sh@ script.  The installer is built using "Saltstack":https://saltproject.io/ and @provision.sh@ performs the install using masterless mode.
+
+h2(#copy_config). Initialize the installer
+
+<notextile>
+<pre><code>./installer.sh initialize ~/setup-arvados-xarv1 {{local_params_src}} {{config_examples_src}}
+cd ~/setup-arvados-xarv1
+</code></pre>
+</notextile>
+
+This creates a git repository in @~/setup-arvados-xarv1 at .  The @installer.sh@ will record all the configuration changes you make, as well as using @git push@ to synchronize configuration edits across all the nodes.
+
+Important!  All further commands must be run in the @~/setup-arvados-xarv1@
diff --git a/doc/_includes/_multi_host_install_custom_certificates.liquid b/doc/_includes/_multi_host_install_custom_certificates.liquid
index b831aadcf..7672372af 100644
--- a/doc/_includes/_multi_host_install_custom_certificates.liquid
+++ b/doc/_includes/_multi_host_install_custom_certificates.liquid
@@ -8,16 +8,16 @@ Copy your certificates to the directory specified with the variable @CUSTOM_CERT
 
 The script expects cert/key files with these basenames (matching the role except for <i>keepweb</i>, which is split in both <i>download / collections</i>):
 
-* "controller"
-* "websocket"
-* "workbench"
-* "workbench2"
-* "webshell"
-* "download"         # Part of keepweb
-* "collections"      # Part of keepweb
-* "keepproxy"
+# @controller@
+# @websocket@        # note: corresponds to default domain @ws.${CLUSTER}.${DOMAIN}@
+# @keepproxy@        # note: corresponds to default domain @keep.${CLUSTER}.${DOMAIN}@
+# @download@         # Part of keepweb
+# @collections@      # Part of keepweb -- important note, this should be a wildcard for @*.collections.${CLUSTER}.${DOMAIN}@
+# @workbench@
+# @workbench2@
+# @webshell@
 
-E.g. for 'keepproxy', the script will look for
+For example, for the 'keepproxy' service the script will expect to find this certificate:
 
 <notextile>
 <pre><code>${CUSTOM_CERTS_DIR}/keepproxy.crt
@@ -26,3 +26,5 @@ ${CUSTOM_CERTS_DIR}/keepproxy.key
 </notextile>
 
 Make sure that all the FQDNs that you will use for the public-facing applications (API/controller, Workbench, Keepproxy/Keepweb) are reachable.
+
+It may be easier to create a single certificate wh
\ No newline at end of file
diff --git a/doc/_includes/_ssl_config_multi.liquid b/doc/_includes/_ssl_config_multi.liquid
index 1bcd1b64e..d001a5f22 100644
--- a/doc/_includes/_ssl_config_multi.liquid
+++ b/doc/_includes/_ssl_config_multi.liquid
@@ -8,9 +8,9 @@ h2(#certificates). Choose the SSL configuration (SSL_MODE)
 
 Arvados requires an SSL certificate to work correctly. This installer supports these options:
 
-* @self-signed@: let the installer create self-signed certificates
-* @lets-encrypt@: automatically obtain and install an SSL certificates for your hostnames
-* @bring-your-own@: supply your own certificates in the `certs` directory
+# @self-signed@: "let the installer create self-signed certificates":#self-signed
+# @lets-encrypt@: "automatically obtain and install an SSL certificates for your hostnames":#lets-encrypt
+# @bring-your-own@: "supply your own certificates in the @certs@ directory":#bring-your-own
 
 h3(#self-signed). Using self-signed certificates
 
@@ -21,7 +21,7 @@ To make the installer use self-signed certificates, change the configuration lik
 </code></pre>
 </notextile>
 
-When connecting to the Arvados web interface for the first time, you will need to accept the self-signed certificates as trusted to bypass the browser warnings. This can be a little tricky to do. Alternatively, you can also install the self-signed root certificate in your browser, see <a href="#ca_root_certificate">below</a>.
+Before connecting to the Arvados web interface for the first time, anyone accessing the instance will need to "install the self-signed root certificate in their browser.":#ca_root_certificate
 
 h3(#lets-encrypt). Using a Let's Encrypt certificate
 
diff --git a/doc/admin/maintenance-and-upgrading.html.textile.liquid b/doc/admin/maintenance-and-upgrading.html.textile.liquid
index 3cc80a356..480f5114e 100644
--- a/doc/admin/maintenance-and-upgrading.html.textile.liquid
+++ b/doc/admin/maintenance-and-upgrading.html.textile.liquid
@@ -42,9 +42,9 @@ Run @arvados-server config-check@ to make sure the configuration file has no err
 
 h3(#distribution). Distribute the configuration file
 
-We recommend to keep the @config.yml@ file in sync between all the Arvados system nodes, to avoid issues with services running on different versions of the configuration.
+It is very important to keep the @config.yml@ file in sync between all the Arvados system nodes, to avoid issues with services running on different versions of the configuration.
 
-Distribution of the configuration file can be done in many ways, e.g. scp, configuration management software, etc.
+We provide "installer.sh":../install/salt-multi-host-install.html#installation to distribute config changes.  You may also do your own orchestration e.g. @scp@, configuration management software, etc.
 
 h3(#restart). Restart the services affected by the change
 
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 5145d433b..1a70d46ef 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -12,15 +12,14 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 # "Introduction":#introduction
 # "Prerequisites and planning":#prerequisites
 # "Download the installer":#download
-# "Copy and customize the configuration files":#copy_config
+# "Initialize the installer":#copy_config
 # "Choose the SSL configuration":#certificates
 ## "Using a self-signed certificates":#self-signed
 ## "Using a Let's Encrypt certificates":#lets-encrypt
 ## "Bring your own certificates":#bring-your-own
 # "Create a compute image":#create_a_compute_image
 # "Further customization of the installation (modifying the salt pillars and states)":#further_customization
-# "Installation order":#installation_order
-# "Run the provision.sh script":#run_provision_script
+# "Begin installation":#installation
 # "Install the CA root certificate":#ca_root_certificate
 # "Initial user and login":#initial_user
 # "Test the installed cluster running a simple workflow":#test_install
@@ -28,156 +27,227 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#introduction). Introduction
 
-This multi host installer is an AWS specific example that is generally useful, but will likely need to be adapted for your environment. The installer is highly configurable.
+This multi host installer is the recommendend way to set up a production Arvados cluster.  These instructions include speciic details for installing on Amazon Web Services (AWS), which are marked as "AWS specific".  However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems.
 
 h2(#prerequisites). Prerequisites and planning
 
-Prerequisites:
+h3. Cluster ID and base domain
 
-* git
-* a number of (virtual) machines for your Arvados cluster with at least 2 cores and 8 GiB of RAM, running a "supported Arvados distribution":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux
-* a number of DNS hostnames that resolve to the IP addresses of your Arvados hosts
-* ports 443 need to be reachable from your client (configurable in @local.params@, see below)
-* port 80 needs to be reachable from everywhere on the internet (only when using "Let's Encrypt":#lets-encrypt without Route53 integration)
-* SSL certificatse matching the hostnames in use (only when using "bring your own certificates":#bring-your-own)
+Choose a 5-character cluster identifier that will represent the cluster.  Here are "guidelines on choosing a cluster identifier":../architecture/federation.html#cluster_id .  Only lowercase letters and digits 0-9 are allowed.  Examples will use @xarv1@ or ${CLUSTER}, you should substitute the cluster id you have selected.
 
-Planning:
+Determine the base domain for the cluster.  This will be referred to as ${DOMAIN}
 
-We suggest distributing the Arvados components in the following way, creating at least 6 hosts:
+For example, if CLUSTER is "xarv1" and DOMAIN is "example.com", then "controller.${CLUSTER}.${DOMAIN}" means "controller.xargv1.example.com".
 
-# Database server:
+h3. Virtual Private Cloud (AWS specific)
+
+We recommend setting Arvados up in a "Virtual Private Cloud (VPC)":https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html
+
+When you do so, you need to configure a couple of additional things:
+
+# "Create a subnet for the compute nodes":https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html
+# You should set up a "security group which allows SSH access (port 22)":https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html
+# Make sure to add a "VPC S3 endpoint":https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html
+
+h3. S3 Bucket (AWS specific)
+
+We recommend "creating an S3 bucket":https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html for data storage named @${CLUSTER}-nyw5e-000000000000000-volume@
+
+Then create an IAM role called @${CLUSTER}-keepstore-00-iam-role@ which has "permission to read and write the bucket":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html
+
+h3. Machines
+
+You will need to allocate (virtual) machines for the fixed infrastructure of the Arvados cluster.  These machines should have at least 2 cores and 8 GiB of RAM, running a "supported Arvados distribution":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux
+
+Allocate these as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.
+
+The installer will set up the Arvados services on your machines.  Here is the default assignment of services to machines:
+
+# API node
 ## postgresql server
-# API node:
 ## arvados api server
-## arvados controller
-## arvados websocket
+## arvados controller  (recommendend hostname @controller.${CLUSTER}.${DOMAIN}@)
+## arvados websocket   (recommendend hostname @ws.${CLUSTER}.${DOMAIN}@)
 ## arvados cloud dispatcher
 ## arvados keepbalance
-# WORKBENCH node:
-## arvados workbench
-## arvados workbench2
-## arvados webshell
-# KEEPPROXY node:
-## arvados keepproxy
-## arvados keepweb
 # KEEPSTORE nodes (at least 2)
-## arvados keepstore
-# SHELL node (optional):
-## arvados shell
-
-If your infrastructure differs from the setup proposed above (ie, using RDS or an existing DB server), remember that you will need to edit the configuration files for the scripts so they work with your infrastructure.
+## arvados keepstore   (recommendend hostnames @keep0.${CLUSTER}.${DOMAIN}@ and @keep1.${CLUSTER}.${DOMAIN}@)
+# KEEPPROXY node
+## arvados keepproxy   (recommendend hostname @keep.${CLUSTER}.${DOMAIN}@)
+## arvados keepweb     (recommendend hostname @download.${CLUSTER}.${DOMAIN}@ and @*.collections.${CLUSTER}.${DOMAIN}@)
+# WORKBENCH node
+## arvados workbench   (recommendend hostname @workbench.${CLUSTER}.${DOMAIN}@)
+## arvados workbench2  (recommendend hostname @workbench2.${CLUSTER}.${DOMAIN}@)
+## arvados webshell    (recommendend hostname @webshell.${CLUSTER}.${DOMAIN}@)
+# SHELL node  (optional)
+## arvados shell       (recommended hostname @shell.${CLUSTER}.${DOMAIN}@)
+
+Additional prerequisites when preparing machines to run the installer:
+
+# root or passwordless sudo access
+# from the account where you are performing the install, passwordless @ssh@ to each machine (meaning, the client's public key added to @~/.ssh/authorized_keys@ on each node)
+# @git@ installed on each machine
+# port 443 reachable by clients
+# DNS hostnames for each service
+## @controller.${CLUSTER}.${DOMAIN}@
+## @ws.${CLUSTER}.${DOMAIN}@
+## @keep0.${CLUSTER}.${DOMAIN}@
+## @keep1.${CLUSTER}.${DOMAIN}@
+## @keep.${CLUSTER}.${DOMAIN}@
+## @download.${CLUSTER}.${DOMAIN}@
+## @*.collections.${CLUSTER}.${DOMAIN}@  -- important note, this should be a wildcard DNS, going to the keepweb service
+## @workbench.${CLUSTER}.${DOMAIN}@
+## @workbench2.${CLUSTER}.${DOMAIN}@
+## @webshell.${CLUSTER}.${DOMAIN}@
+## @shell.${CLUSTER}.${DOMAIN}@
+
+(AWS specific) The machine that runs the arvados cloud dispatcher will need an "IAM role that allows it to create EC2 instances, see here for details .":https://doc.arvados.org/v2.4/install/crunch2-cloud/install-dispatch-cloud.html
+
+If your infrastructure differs from the setup proposed above (ie, different hostnames, or using AWS RDS or an existing DB server), you can still use the installer, but additional customization will be necessary.
 
 h2(#download). Download the installer
 
+{% assign local_params_src = 'multiple_hosts' %}
+{% assign config_examples_src = 'multi_host/aws'%}
 {% include 'download_installer' %}
 
-h2(#copy_config). Copy and customize the configuration files
-
-<notextile>
-<pre><code>cp local.params.example.multiple_hosts local.params
-cp -r config_examples/multi_host/aws local_config_dir
+h2. Edit @local.params@
+
+This can be found wherever you choose to initialize the install files (@~/setup-arvados-xarv1@ in these examples).
+
+# Set @CLUSTER@ to the 5-character cluster identifier (e.g "xarv1")
+# Set @DOMAIN@ to the base DNS domain of the environment, e.g. "example.com"
+# Edit Internal IP settings. Since services share hosts, some hosts are the same.
+# Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC
+	AWS Specific: Go to the AWS console and into the VPC service, there is a column in
+	this table view of the VPCs that gives the CIDR for the VPC (IPv4 CIDR).
+# Set @INITIAL_USER_EMAIL@ to your email address, as you will be the first admin user of the system.
+# Set each @KEY@ / @TOKEN@ to a random string
+	Here's an easy way to create five random tokens:
+<pre><code>for i in 1 2 3 4 5; do
+  tr -dc A-Za-z0-9 </dev/urandom | head -c 32 ; echo ''
+done
 </code></pre>
-</notextile>
-
-Edit the variables in the <i>local.params</i> file. Pay attention to the <notextile><b>*_INT_IP, *_TOKEN</b> and <b>*_KEY</b></notextile> variables. The *SSL_MODE* variable is discussed in the next section.
+# Set @DATABASE_PASSWORD@ to a random string
+   Important! If this contains any non-alphanumeric characters, in particular ampersand ('&'), it is necessary to add backslash quoting.
+   For example, if the password is `Cq&WU<A']p?j`
+   With backslash quoting the special characters it should appear like this in local.params:
+<pre><code>DATABASE_PASSWORD="Cq\&WU\<A\'\]p\?j"</code></pre>
 
 {% include 'ssl_config_multi' %}
 
+h2(#create_a_compute_image). Configure Keep on S3 (AWS specific)
+
+Once you have that image created, Open @local_config_dir/pillars/arvados.sls@ and edit as follows:
+
+1. In the @arvados.cluster.Volumes@ section, set @Region@ to the appropriate AWS region (e.g. 'us-east-1')
+
 h2(#create_a_compute_image). Create a compute image
 
 {% include 'branchname' %}
 
-In a multi-host installation, containers are dispatched in docker daemons running in the <i>compute instances</i>, which need some special setup. We provide a "compute image builder script":https://github.com/arvados/arvados/tree/{{ branchname }}/tools/compute-images that you can use to build a template image following "these instructions":https://doc.arvados.org/install/crunch2-cloud/install-compute-node.html. Once you have that image created, you will need to update the <i>pillars/arvados.sls</i> file with the AMI ID and the private ssh key for the dispatcher.
+On cloud installations, containers are dispatched in Docker daemons running in the <i>compute instances</i>, which need some special setup.  Follow "the instructions build a cloud compute node image":https://doc.arvados.org/install/crunch2-cloud/install-compute-node.html using the "compute image builder script":https://github.com/arvados/arvados/tree/{{ branchname }}/tools/compute-images  .
 
-h2(#further_customization). Further customization of the installation (modifying the salt pillars and states)
+Once you have that image created, Open @local_config_dir/pillars/arvados.sls@ and edit as follows (AWS specific settings described here, configuration for Azure is similar):
 
-You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that describes your environment.
+# In the @arvados.cluster.Containers.CloudVMs@ section:
+## Set @ImageID@ to the AMI output from Packer
+## Set @Region@ to the appropriate AWS region
+## Set @AdminUsername@ to the admin user account on the image
+## Set the @SecurityGroupIDs@ list to the VPC security group which you set up to allow SSH connections to these nodes
+## Set @SubnetID@ to the value of SubnetId of your VPC
+# Update @arvados.cluster.Containers.DispatchPrivateKey@ and paste the contents of the @~/.ssh/id_dispatcher@ file you generated in an earlier step.
+# Update @arvados.cluster.InstanceTypes@ as necessary. If t3 and m5/c5 node types are not available, replace them with t2 and m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed.
 
-Any extra <i>state</i> file you add under <i>local_config_dir/states</i> will be added to the salt run and applied to the hosts.
+h2(#further_customization). Further customization of the installation
 
-h2(#installation_order). Installation order
+If you are installing on AWS and following the naming conventions recommend in this guide, then likely no further configuration is necessary and you can begin installation.
 
-A few Arvados nodes need to be installed in certain order. The required order is
+If your infrastructure differs from the setup proposed above (ie, using AWS RDS or an existing DB server), you can still use the installer, but additional customization will be necessary.
 
-* Database
-* API server
-* The other nodes can be installed in any order after the two above
+This is done by editing the Saltstack pillars and states files found in @local_config_dir at .  In particular, @local_config_dir/pillars/arvados.sls@ has the template used to produce the Arvados configuration file that is distributed to all the nodes.
 
-h2(#run_provision_script). Run the provision.sh script
+Any extra salt <i>state</i> file you add under @local_config_dir/states@ will be added to the salt run and applied to the hosts.
 
-When you finished customizing the configuration, you are ready to copy the files to the hosts and run the @provision.sh@ script. The script allows you to specify the <i>role/s</i> a node will have and it will install only the Arvados components required for such role. The general format of the command is:
+h2(#installation). Begin installation
 
-<notextile>
-<pre><code>scp -r provision.sh local* user at host:
-ssh user at host sudo ./provision.sh --roles comma,separated,list,of,roles,to,apply
-</code></pre>
-</notextile>
+At this point, you are ready to run the installer script in deploy mode that will conduct all of the Arvados installation.
 
-and wait for it to finish.
+Run this in ~/arvados-setup-xarv1:
 
-If everything goes OK, you'll get some final lines stating something like:
+<pre>
+./installer.sh deploy
+</pre>
 
-<notextile>
-<pre><code>arvados: Succeeded: 109 (changed=9)
-arvados: Failed:      0
-</code></pre>
-</notextile>
+This will deploy all the nodes.  It will take a while and produce a lot of logging.  If it runs into an error, it will stop.
 
-The distribution of role as described above can be applied running these commands:
+When everything has finished, you can run the diagnostics.
 
-h4. Database
-<notextile>
-<pre><code>scp -r provision.sh local* user at host:
-ssh user at host sudo ./provision.sh --config local.params --roles database
-</code></pre>
-</notextile>
+Depending on where you are running the installer, you need to provide @-internal-client@ or @-external-client at .
 
-h4. API
-<notextile>
-<pre><code>scp -r provision.sh local* user at host:
-ssh user at host sudo ./provision.sh --config local.params --roles api,controller,websocket,dispatcher,keepbalance
-</code></pre>
-</notextile>
+You are probably an "internal client" if you are running the diagnostics from one of the Arvados machines inside the VPC.
 
-h4. Keepstore(s)
-<notextile>
-<pre><code>scp -r provision.sh local* user at host:
-ssh user at host sudo ./provision.sh --config local.params --roles keepstore
-</code></pre>
-</notextile>
+You are an "external client" if you running the diagnostics from your workstation outside of the VPC.
 
-h4. Workbench
-<notextile>
-<pre><code>scp -r provision.sh local* user at host:
-ssh user at host sudo ./provision.sh --config local.params --roles workbench,workbench2,webshell
-</code></pre>
-</notextile>
+<pre>
+./installer.sh diagnostics (-internal-client|-external-client)
+</pre>
 
-h4. Keepproxy / Keepweb
-<notextile>
-<pre><code>scp -r provision.sh local* user at host:
-ssh user at host sudo ./provision.sh --config local.params --roles keepproxy,keepweb
-</code></pre>
-</notextile>
+h3. Diagnosing issues
 
-h4. Shell (here we copy the CLI test workflow too)
-<notextile>
-<pre><code>scp -r provision.sh local* tests user at host:
-ssh user at host sudo ./provision.sh --config local.params --roles shell
-</code></pre>
-</notextile>
+Most service logs go to @/var/log/syslog@
 
-{% include 'install_ca_cert' %}
+The logs for Rails API server and for Workbench can be found in
 
-h2(#initial_user). Initial user and login
+@/var/www/arvados-api/current/log/production.log@
+and
+@/var/www/arvados-workbench/current/log/production.log@
+
+on the appropriate instances.
+
+Workbench2 is a client-side Javascript application, if it having trouble loading, check the browser's developer console.
+
+h3(#iterating). Iterating on config changes
+
+You can iterate on the config and maintain the cluster by making changes to @local.params@ and @local_config_dir@ and running @installer.sh deploy@ again.
 
-At this point you should be able to log into the Arvados cluster. The initial URL will be:
+If you are debugging a configuration issue on a specific node, you can speed up the cycle a bit by deploying just one node:
 
-* https://workbench.arva2.arv.local
+ at installer.sh deploy keep0.xarv1.example.com@
 
-or, in general, the url format will be:
+However, once you have a final configuration, you should run a full deploy to ensure that the configuration has been synchronized on all the nodes.
 
-* https://workbench.@<cluster>.<domain>@
+h3. Common problems and solutions
+
+* (AWS Specific) If the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start.  You'll see an error in syslog on the node that runs @arvados-dispatch-cloud at .  The solution is to build a new AMI with --aws-ena-support true
+
+* The arvados-api-server package sets up the database as a post-install script.  If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database.
+
+This will manifest as an error like this:
+
+<pre>
+#<ActiveRecord::StatementInvalid: PG::UndefinedTable: ERROR:  relation \"api_clients\" does not exist
+</pre>
+
+If this happens, you need to
+
+1. correct the database information
+2. run "installer.sh deploy xngs2.rdcloud.bms.com" to update the configuration on the API/controller node
+3. On the API/controller server node, run this command to re-run the post-install script, which will set up the database:
+
+<pre>
+dpkg-reconfigure arvados-api-server
+</pre>
+
+4. Re-run 'installer.sh deploy' again to synchronize everything, and so that the install steps that need to contact the API server are run successfully.
+
+{% include 'install_ca_cert' %}
+
+h2(#initial_user). Initial user and login
+
+At this point you should be able to log into the Arvados cluster. The initial URL will be
+
+* https://workbench.${CLUSTER}.${DOMAIN}
 
 By default, the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.
 
@@ -185,11 +255,11 @@ Assuming you didn't change these values in the @local.params@ file, the initial
 
 * User: 'admin'
 * Password: 'password'
-* Email: 'admin at arva2.arv.local'
+* Email: 'admin@${CLUSTER}.${DOMAIN}'
 
 h2(#test_install). Test the installed cluster running a simple workflow
 
-If you followed the instructions above, the @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@ directory in the @shell@ node. If you want to run it, just ssh to the node, change to that directory and run:
+As part of the installation, the @provision.sh@ script saves a simple example test workflow in the @/tmp/cluster_tests@ directory in the @shell@ node. If you want to run it, just ssh to the node, then run:
 
 <notextile>
 <pre><code>cd /tmp/cluster_tests
@@ -281,6 +351,10 @@ INFO Final process status is success
 
 h2(#post_install). After the installation
 
-Once the installation is complete, it is recommended to keep a copy of your local configuration files. Committing them to version control is a good idea.
+As part of the operation of @installer.sh@, it automatically creates a @git@ repository with your configuration templates.  You should retain this repository but be aware that it contains sensitive information (passwords and tokens used by the Arvados services).
+
+As described in "Iterating on config changes":#iterating you may use @installer.sh deploy@ to re-run the Salt to deploy configuration changes and upgrades.  However, be aware that the configuration templates created for you by @installer.sh@ are a snapshot which are not automatically kept up to date.
+
+When deploying upgrades, consult the "Arvados upgrade notes":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls at .
 
-Re-running the Salt-based installer is not recommended for maintaining and upgrading Arvados, please see "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.
+See "Maintenance and upgrading":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index ade1ad467..5e7ae7ca1 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -20,7 +20,7 @@ DEPLOY_USER=root
 # installer.sh will log in to each of these nodes and then provision
 # it for the specified roles.
 NODES=(
-  [controller.${CLUSTER}.${DOMAIN}]=api,controller,websocket,dispatcher,keepbalance
+  [controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance
   [keep0.${CLUSTER}.${DOMAIN}]=keepstore
   [keep1.${CLUSTER}.${DOMAIN}]=keepstore
   [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb
@@ -67,12 +67,12 @@ INITIAL_USER_EMAIL="admin at cluster_fixme_or_this_wont_work.domain_fixme_or_this_w
 INITIAL_USER_PASSWORD="password"
 
 # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
-BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
-MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
-SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
-ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
-WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
-DATABASE_PASSWORD=please_set_this_to_some_secure_value
+BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value
 
 # SSL CERTIFICATES
 # Arvados requires SSL certificates to work correctly. This installer supports these options:
diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames
index f072fedb4..de2fb4e04 100644
--- a/tools/salt-install/local.params.example.single_host_multiple_hostnames
+++ b/tools/salt-install/local.params.example.single_host_multiple_hostnames
@@ -19,7 +19,7 @@ DEPLOY_USER=root
 # installer.sh will log in to each of these nodes and then provision
 # it for the specified roles.
 NODES=(
-  [localhost]=api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell,shell
+  [localhost]=database,api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell,shell
 )
 
 # External ports used by the Arvados services
@@ -38,12 +38,12 @@ INITIAL_USER_EMAIL="admin at cluster_fixme_or_this_wont_work.domain_fixme_or_this_w
 INITIAL_USER_PASSWORD="password"
 
 # YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
-BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
-MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
-SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
-ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
-WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
-DATABASE_PASSWORD=please_set_this_to_some_secure_value
+BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value
 
 # SSL CERTIFICATES
 # Arvados requires SSL certificates to work correctly. This installer supports these options:
diff --git a/tools/salt-install/local.params.example.single_host_single_hostname b/tools/salt-install/local.params.example.single_host_single_hostname
index fdb10cf63..0a9965426 100644
--- a/tools/salt-install/local.params.example.single_host_single_hostname
+++ b/tools/salt-install/local.params.example.single_host_single_hostname
@@ -19,7 +19,7 @@ DEPLOY_USER=root
 # installer.sh will log in to each of these nodes and then provision
 # it for the specified roles.
 NODES=(
-  [localhost]=api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell,shell
+  [localhost]=database,api,controller,websocket,dispatcher,keepbalance,keepstore,keepproxy,keepweb,workbench,workbench2,webshell,shell
 )
 
 # Set this value when installing a cluster in a single host with a single
@@ -46,12 +46,12 @@ INITIAL_USER_EMAIL="admin at cluster_fixme_or_this_wont_work.domain_fixme_or_this_w
 INITIAL_USER_PASSWORD="password"
 
 # Populate these values with random strings
-BLOB_SIGNING_KEY=blobsigningkeymushaveatleast32characters
-MANAGEMENT_TOKEN=managementtokenmushaveatleast32characters
-SYSTEM_ROOT_TOKEN=systemroottokenmushaveatleast32characters
-ANONYMOUS_USER_TOKEN=anonymoususertokenmushaveatleast32characters
-WORKBENCH_SECRET_KEY=workbenchsecretkeymushaveatleast32characters
-DATABASE_PASSWORD=please_set_this_to_some_secure_value
+BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
+MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters
+SYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters
+ANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters
+WORKBENCH_SECRET_KEY=fixmeworkbenchsecretkeymushaveatleast32characters
+DATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value
 
 # SSL CERTIFICATES
 # Arvados requires SSL certificates to work correctly. This installer supports these options:

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list