[ARVADOS] created: 2.3.2-2-gd1828fefa

Git user git at public.arvados.org
Thu Jan 6 17:59:02 UTC 2022


        at  d1828fefa997bcd6257c39ca6f6f605a5a32707a (commit)


commit d1828fefa997bcd6257c39ca6f6f605a5a32707a
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Jan 6 12:02:37 2022 -0500

    Bugfixes: doc: show the correct branch names for all the Salt install
    methods.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_includes/_branchname.liquid b/doc/_includes/_branchname.liquid
index 44707e3f9..7180e09ce 100644
--- a/doc/_includes/_branchname.liquid
+++ b/doc/_includes/_branchname.liquid
@@ -5,7 +5,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% if site.current_version and site.current_version != 'main' %}
-{% assign branchname = site.current_version | slice: 0, 3 | append: '-dev' %}
+{% assign branchname = site.current_version | slice: 1, 3 | append: '-dev' %}
 {% else %}
 {% assign branchname = 'main' %}
 {% endif %}
diff --git a/doc/install/salt-vagrant.html.textile.liquid b/doc/install/salt-vagrant.html.textile.liquid
index 8ba4b324e..19a2cd510 100644
--- a/doc/install/salt-vagrant.html.textile.liquid
+++ b/doc/install/salt-vagrant.html.textile.liquid
@@ -18,7 +18,9 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#vagrant). Vagrant
 
-This is a package-based installation method. The Salt scripts are available from the "tools/salt-install":https://github.com/arvados/arvados/tree/main/tools/salt-install directory in the Arvados git repository.
+{% include 'branchname' %}
+
+This is a package-based installation method. Start by cloning the @{{ branchname }}@ branch from "https://git.arvados.org/arvados.git":https://git.arvados.org/arvados.git . The Salt scripts are available in the @tools/salt-install@ directory.
 
 A @Vagrantfile@ is provided to install Arvados in a virtual machine on your computer using "Vagrant":https://www.vagrantup.com/.
 

commit 5a1ba930d6c3c61dfe2dbce127954c57f78bff1f
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Fri Oct 29 11:17:36 2021 -0400

    Merge branch '18273-java-timeout' refs #18273
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/sdk/java-v2/index.html.textile.liquid b/doc/sdk/java-v2/index.html.textile.liquid
index b3336d51d..ad9f0e1a9 100644
--- a/doc/sdk/java-v2/index.html.textile.liquid
+++ b/doc/sdk/java-v2/index.html.textile.liquid
@@ -32,7 +32,7 @@ application {
 }
 
 dependencies {
-    api 'org.arvados:arvados-java-sdk:0.1.0'
+    api 'org.arvados:arvados-java-sdk:0.1.1'
 }
 </pre>
 
@@ -140,7 +140,7 @@ Dependencies:
 $ <code class="userinput">git clone https://github.com/arvados/arvados.git</code>
 $ <code class="userinput">cd arvados/sdk/java-v2</code>
 $ <code class="userinput">gradle test</code>
-$ <code class="userinput">gradle jar</code>
+$ <code class="userinput">gradle jar -Pversion=0.1.1</code>
 </pre>
-This will build the SDK and run all unit tests, then generate an Arvados Java sdk jar file in build/libs/arvados-java-2.0.0.jar
+This will build the SDK and run all unit tests, then generate an Arvados Java sdk jar file in build/libs/arvados-java-0.1.1.jar
 </notextile>
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
index a8d1a08cb..51f2f4a81 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java
@@ -23,6 +23,7 @@ import java.io.UnsupportedEncodingException;
 import java.net.URLDecoder;
 import java.nio.charset.StandardCharsets;
 import java.util.Objects;
+import java.util.concurrent.TimeUnit;
 
 abstract class BaseApiClient {
 
@@ -34,7 +35,12 @@ abstract class BaseApiClient {
 
     BaseApiClient(ConfigProvider config) {
         this.config = config;
-        this.client = OkHttpClientFactory.INSTANCE.create(config.isApiHostInsecure());
+        this.client = OkHttpClientFactory.INSTANCE.create(config.isApiHostInsecure())
+	    .newBuilder()
+	    .connectTimeout(config.getConnectTimeout(), TimeUnit.MILLISECONDS)
+	    .readTimeout(config.getReadTimeout(), TimeUnit.MILLISECONDS)
+	    .writeTimeout(config.getWriteTimeout(), TimeUnit.MILLISECONDS)
+	    .build();
     }
 
     Request.Builder getRequestBuilder() {
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java
index c9a410931..116a46b3e 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/config/ConfigProvider.java
@@ -26,6 +26,11 @@ public interface ConfigProvider {
 
     String getApiProtocol();
 
+    int getConnectTimeout();
+
+    int getReadTimeout();
+
+    int getWriteTimeout();
 
     //FILE UPLOAD
     int getFileSplitSize();
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
index 17e06966f..d592b23ac 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java
@@ -22,8 +22,35 @@ public class ExternalConfigProvider implements ConfigProvider {
     private File fileSplitDirectory;
     private int numberOfCopies;
     private int numberOfRetries;
+    private int connectTimeout;
+    private int readTimeout;
+    private int writeTimeout;
+
+    ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort,
+			   String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory,
+			   int numberOfCopies, int numberOfRetries)
+    {
+        this.apiHostInsecure = apiHostInsecure;
+        this.keepWebHost = keepWebHost;
+        this.keepWebPort = keepWebPort;
+        this.apiHost = apiHost;
+        this.apiPort = apiPort;
+        this.apiToken = apiToken;
+        this.apiProtocol = apiProtocol;
+        this.fileSplitSize = fileSplitSize;
+        this.fileSplitDirectory = fileSplitDirectory;
+        this.numberOfCopies = numberOfCopies;
+        this.numberOfRetries = numberOfRetries;
+	this.connectTimeout = 60000;
+	this.readTimeout = 60000;
+	this.writeTimeout = 60000;
+    }
 
-    ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort, String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory, int numberOfCopies, int numberOfRetries) {
+    ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort,
+			   String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory,
+			   int numberOfCopies, int numberOfRetries,
+			   int connectTimeout, int readTimeout, int writeTimeout)
+    {
         this.apiHostInsecure = apiHostInsecure;
         this.keepWebHost = keepWebHost;
         this.keepWebPort = keepWebPort;
@@ -35,6 +62,9 @@ public class ExternalConfigProvider implements ConfigProvider {
         this.fileSplitDirectory = fileSplitDirectory;
         this.numberOfCopies = numberOfCopies;
         this.numberOfRetries = numberOfRetries;
+	this.connectTimeout = connectTimeout;
+	this.readTimeout = readTimeout;
+	this.writeTimeout = writeTimeout;
     }
 
     public static ExternalConfigProviderBuilder builder() {
@@ -102,6 +132,18 @@ public class ExternalConfigProvider implements ConfigProvider {
         return this.numberOfRetries;
     }
 
+    public int getConnectTimeout() {
+        return this.connectTimeout;
+    }
+
+    public int getReadTimeout() {
+        return this.readTimeout;
+    }
+
+    public int getWriteTimeout() {
+        return this.writeTimeout;
+    }
+
     public static class ExternalConfigProviderBuilder {
         private boolean apiHostInsecure;
         private String keepWebHost;
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java b/sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java
index 589c3346b..99c1af810 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java
@@ -104,4 +104,19 @@ public class FileConfigProvider implements ConfigProvider {
     public String getIntegrationTestProjectUuid() {
         return this.getString("integration-tests.project-uuid");
     }
+
+    @Override
+    public int getConnectTimeout() {
+        return this.getInt("connectTimeout");
+    }
+
+    @Override
+    public int getReadTimeout() {
+        return this.getInt("readTimeout");
+    }
+
+    @Override
+    public int getWriteTimeout() {
+        return this.getInt("writeTimeout");
+    }
 }
diff --git a/sdk/java-v2/src/main/resources/reference.conf b/sdk/java-v2/src/main/resources/reference.conf
index 3ff2bb0a9..8933e2bc6 100644
--- a/sdk/java-v2/src/main/resources/reference.conf
+++ b/sdk/java-v2/src/main/resources/reference.conf
@@ -1,3 +1,7 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+#
 # Arvados client default configuration
 #
 # Remarks:
@@ -20,4 +24,7 @@ arvados {
     temp-dir = /tmp/file-split
     copies = 2
     retries = 0
+    connectTimeout = 60000
+    readTimeout = 60000
+    writeTimeout = 60000
 }
diff --git a/sdk/java-v2/test-in-docker.sh b/sdk/java-v2/test-in-docker.sh
index c685005c0..7af3d32c4 100755
--- a/sdk/java-v2/test-in-docker.sh
+++ b/sdk/java-v2/test-in-docker.sh
@@ -6,12 +6,11 @@
 #
 set -e
 
-format_last_commit_here() {
-    local format="$1"; shift
-    TZ=UTC git log -n1 --first-parent "--format=format:$format" .
+commit_at_dir() {
+    git log -n1 --format=%H .
 }
 
-version_from_git() {
+build_version() {
     # Output the version being built, or if we're building a
     # dev/prerelease, output a version number based on the git log for
     # the current working directory.
@@ -20,29 +19,7 @@ version_from_git() {
         return
     fi
 
-    local git_ts git_hash prefix
-    if [[ -n "$1" ]] ; then
-        prefix="$1"
-    else
-        prefix="0.1"
-    fi
-
-    declare $(format_last_commit_here "git_ts=%ct git_hash=%h")
-    ARVADOS_BUILDING_VERSION="$(git describe --abbrev=0).$(date -ud "@$git_ts" +%Y%m%d%H%M%S)"
-    echo "$ARVADOS_BUILDING_VERSION"
-} 
-
-nohash_version_from_git() {
-    version_from_git $1 | cut -d. -f1-3
+    $WORKSPACE/build/version-at-commit.sh $(commit_at_dir)
 }
 
-timestamp_from_git() {
-    format_last_commit_here "%ct"
-}
-if [[ -n "$1" ]]; then
-    build_version="$1"
-else
-    build_version="$(version_from_git)"
-fi
-#UID=$(id -u) # UID is read-only on many systems
-exec docker run --rm --user $UID -v $PWD:$PWD -w $PWD gradle /bin/sh -c 'gradle clean && gradle test && gradle jar install '"$gradle_upload"
\ No newline at end of file
+exec docker run --rm --user $UID -v $PWD:$PWD -w $PWD gradle:5.3.1 /bin/sh -c 'gradle clean && gradle test && gradle jar install '"-Pversion=$(build_version) $gradle_upload"

commit e8a8cadb6035fa8c03589b9ab5ac1a382407c6a1
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Thu Dec 9 11:27:08 2021 -0500

    Update package versions for arvbox & provision.sh
    
    refs #18518
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/tools/arvbox/bin/arvbox b/tools/arvbox/bin/arvbox
index 7aefe73d1..70e5b6fa9 100755
--- a/tools/arvbox/bin/arvbox
+++ b/tools/arvbox/bin/arvbox
@@ -61,7 +61,7 @@ if test -z "$WORKBENCH2_BRANCH" ; then
 fi
 
 # Update this to the docker tag for the version on releases.
-DEFAULT_TAG=2.3.1
+DEFAULT_TAG=2.3.2
 
 PG_DATA="$ARVBOX_DATA/postgres"
 VAR_DATA="$ARVBOX_DATA/var"
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 194a62a8a..c678a206d 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -175,7 +175,7 @@ CUSTOM_CERTS_DIR="./certs"
 # The "local.params.example.*" files already set "RELEASE=production"
 # to deploy  production-ready packages
 RELEASE="production"
-VERSION="2.3.1-1"
+VERSION="2.3.2-1"
 
 # These are arvados-formula-related parameters
 # An arvados-formula tag. For a stable release, this should be a

commit 4dce2661e80da9b65e5a548c863d61239233d6b0
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Dec 8 16:22:12 2021 -0500

    Add (empty) upgrading section for 2.3.2
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index fbf25fa7a..c409b6f32 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -35,6 +35,12 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
+h2(#v2_3_2). v2.3.2 (2021-12-09)
+
+"previous: Upgrading to 2.3.1":#v2_3_1
+
+There are no changes requiring special attention in this version.
+
 h2(#v2_3_1). v2.3.1 (2021-11-19)
 
 "previous: Upgrading to 2.3.0":#v2_3_0

commit ae313a75753fb34781db0bfe776e855cab924dc3
Author: Ward Vandewege <ward at curii.com>
Date:   Mon Dec 6 19:46:01 2021 -0500

    17667: a round of edits after review feedback.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/admin/config-urls.html.textile.liquid b/doc/admin/config-urls.html.textile.liquid
index 21de54973..1358fd81e 100644
--- a/doc/admin/config-urls.html.textile.liquid
+++ b/doc/admin/config-urls.html.textile.liquid
@@ -14,27 +14,61 @@ The Arvados configuration is stored at @/etc/arvados/config.yml at . See the "Confi
 
 The @Services@ section lists a number of Arvados services, each with an @InternalURLs@ and/or @ExternalURL@ configuration key. This document explains the precise meaning of these configuration keys, and how they are used by the Arvados services.
 
-Generally speaking, the keys under @InternalURLs@ are the endpoints where the service should be listening, and reachable from other hosts inside the Arvados cluster. The @ExternalURL@ value is the URL that the service advertises as its own URL. The @ExternalURL@ is the address where the service should be reachable from outside the Arvados cluster.
+The @ExternalURL@ is the address where the service should be reachable by clients, both from inside and from outside the Arvados cluster. Some services do not expose an Arvados API, only Prometheus metrics. In that case, @ExternalURL@ is not used.
 
-Because many of the Arvados services live behind a reverse proxy (e.g. Nginx as used in this guide), configuring the reverse proxy and the @InternalURLs@ and @ExternalURL@ values must be done in concert.
+The keys under @InternalURLs@ are addresses that are used by the reverse proxy (e.g. Nginx) that fronts Arvados services. The exception is the @Keepstore@ service, where clients connect directly to the addresses listed under @InternalURLs at . If a service is not fronted by a reverse proxy, e.g. when its endpoint only exposes Prometheus metrics, the intention is that metrics are collected directly from the endpoints defined in @InternalURLs at .
 
-We'll walk through a number of examples to explain in more detail.
+ at InternalURLs@ are also used by the service itself to figure out which address/port to listen on.
 
-h2. Keep-balance
+If the Arvados service lives behind a reverse proxy (e.g. Nginx), configuring the reverse proxy and the @InternalURLs@ and @ExternalURL@ values must be done in concert.
+
+h2. Overview
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_.Service     |_.ExternalURL required? |_.InternalURLs required?|_.InternalURLs must be reachable from other cluster nodes?|_.Note|
+|railsapi       |no                     |yes|no ^1^|InternalURLs only used by Controller|
+|controller     |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
+|arvados-dispatch-cloud|no              |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|
+|arvados-dispatch-lsf|no                |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|
+|git-http       |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
+|git-ssh        |yes                    |no |no    ||
+|keepproxy      |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
+|keepstore      |no                     |yes|yes   |All clients connect to InternalURLs|
+|keep-balance   |no                     |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|
+|keep-web       |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
+|websocket      |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|
+|workbench1     |yes                    |no|no     ||
+|workbench2     |yes                    |no|no     ||
+</div>
+
+^1^ If @Controller@ runs on a different host than @RailsAPI@, the @InternalURLs@ will need to be reachable from the host that runs @Controller at .
+^2^ If the reverse proxy (e.g. Nginx) does not run on the same host as the Arvados service it fronts, the @InternalURLs@ will need to be reachable from the host that runs the reverse proxy.
+^3^ If the Prometheus metrics are not collected from the same machine that runs the service, the @InternalURLs@ will need to be reachable from the host that collects the metrics.
+
+When @InternalURLs@ do not need to be reachable from other nodes, it is most secure to use loopback addresses as @InternalURLs@, e.g. @http://127.0.0.1:9005@.
+
+It is recommended to use a split-horizon DNS setup where the hostnames specified in @ExternalURL@ resolve to an internal IP address from inside the Arvados cluster, and a publicly routed external IP address when resolved from outside the cluster. This simplifies firewalling and provides optimally efficient traffic routing. In a cloud environment where traffic that flows via public IP addresses is charged, using split horizon DNS can also avoid unnecessary expense.
+
+h2. Examples
+
+The remainder of this document walks through a number of examples to provide more detail.
+
+h3. Keep-balance
 
 Consider this section for the @Keep-balance@ service:
 
 {% codeblock as yaml %}
       Keepbalance:
         InternalURLs:
-          "http://ClusterID.example.com:9005/": {}
+          "http://ip-10-0-1-233.internal:9005/": {}
 {% endcodeblock %}
 
 @Keep-balance@ has an API endpoint, but it is only used to expose "Prometheus":https://prometheus.io metrics.
 
 There is no @ExternalURL@ key because @Keep-balance@ does not have an Arvados API, no Arvados services need to connect to @Keep-balance at .
 
-The value for @InternalURLs@ tells the @Keep-balance@ service to start up and listen on port 9005, if it is started on a host where @ClusterID.example.com@ resolves to a local IP address. If @Keep-balance@ is started on a machine where the @ClusterID.example.com@ hostname does not resolve to a local IP address, it would refuse to start up, because it would not be able to find a local IP address to listen on.
+The value for @InternalURLs@ tells the @Keep-balance@ service to start up and listen on port 9005, if it is started on a host where @ip-10-0-1-233.internal@ resolves to a local IP address. If @Keep-balance@ is started on a machine where the @ip-10-0-1-233.internal@ hostname does not resolve to a local IP address, it would refuse to start up, because it would not be able to find a local IP address to listen on.
 
 It is also possible to use IP addresses in @InternalURLs@, for example:
 
@@ -56,7 +90,7 @@ Finally, it is also possible to listen on all interfaces, for example:
 
 In this case, @Keep-balance@ will listen on port 9005 on all IP addresses local to the machine.
 
-h2. Keepstore
+h3. Keepstore
 
 Consider this section for the @Keepstore@ service:
 
@@ -71,7 +105,7 @@ There is no @ExternalURL@ key because @Keepstore@ is only accessed from inside t
 
 When @Keepstore@ is installed on the host where @keep0.ClusterID.example.com@ resolves to a local IP address, it will listen on port 25107 on that IP address. Likewise on the @keep1.ClusterID.example.com@ host. On all other systems, @Keepstore@ will refuse to start.
 
-h2. Keepproxy
+h3. Keepproxy
 
 Consider this section for the @Keepproxy@ service:
 
@@ -113,7 +147,7 @@ server {
 
 If a client connects to the @Keepproxy@ service, it will talk to Nginx which will reverse proxy the traffic to the @Keepproxy@ service.
 
-h2. Workbench
+h3. Workbench
 
 Consider this section for the @Workbench@ service:
 
@@ -122,7 +156,7 @@ Consider this section for the @Workbench@ service:
     ExternalURL: "https://workbench.ClusterID.example.com"
 {% endcodeblock %}
 
-The @ExternalURL@ advertised is @https://workbench.ClusterID.example.com@. There is no value for @InternalURLs@ because Workbench1 is a Rails application served by Passenger, and the listening host/post is configured in the Nginx configuration:
+The @ExternalURL@ advertised is @https://workbench.ClusterID.example.com@. There is no value for @InternalURLs@ because Workbench1 is a Rails application served by Passenger. The only client connecting to the Passenger process is the reverse proxy (e.g. Nginx), and the listening host/post is configured in its configuration:
 
 <notextile><pre><code>
 server {
@@ -145,9 +179,9 @@ server {
 }
 </code></pre></notextile>
 
-h2. API server
+h3. API server
 
-Consider this section for the @API server@ service:
+Consider this section for the @RailsAPI@ service:
 
 {% codeblock as yaml %}
       RailsAPI:
@@ -155,9 +189,9 @@ Consider this section for the @API server@ service:
           "http://localhost:8004": {}
 {% endcodeblock %}
 
-There is no @ExternalURL@ defined because the @API server@ is not directly accessible and does not need to advertise a URL: all traffic to it flows via @Controller@, which is the only client that talks to it.
+There is no @ExternalURL@ defined because the @RailsAPI@ is not directly accessible and does not need to advertise a URL: all traffic to it flows via @Controller@, which is the only client that talks to it.
 
-The @API server@ is also a Rails application, and its listening host/port is defined in the Nginx configuration:
+The @RailsAPI@ service is also a Rails application, and its listening host/port is defined in the Nginx configuration:
 
 <notextile><pre><code>
 server {
@@ -185,9 +219,9 @@ server {
 }
 </code></pre></notextile>
 
-So then, why is there a need to specify @InternalURLs@ for the @RailsAPI@ service? It is there because this is how the @Controller@ service locates the @RailsAPI@ service it should talk to. Since this connection is internal to the Avados cluster, @Controller@ uses @InternalURLs@ to find the @RailsAPI@ endpoint.
+So then, why is there a need to specify @InternalURLs@ for the @RailsAPI@ service? It is there because this is how the @Controller@ service locates the @RailsAPI@ service it should talk to. Since this connection is internal to the Arvados cluster, @Controller@ uses @InternalURLs@ to find the @RailsAPI@ endpoint.
 
-h2. Controller
+h3. Controller
 
 Consider this section for the @Controller@ service:
 
@@ -198,7 +232,7 @@ Consider this section for the @Controller@ service:
     ExternalURL: "https://ClusterID.example.com"
 {% endcodeblock %}
 
-The @ExternalURL@ advertised is @https://ClusterID.example.com@. The @Controller@ service will start up on @localhost@ port 8003. Nginx is configured to terminate SSL and sit in front of the @Controller@ service:
+The @ExternalURL@ advertised is @https://ClusterID.example.com@. The @Controller@ service will start up on @localhost@ port 8003. Nginx is configured to sit in front of the @Controller@ service and terminates SSL:
 
 <notextile><pre><code>
 # This is the port where nginx expects to contact arvados-controller.
@@ -237,35 +271,4 @@ server {
 }
 </code></pre></notextile>
 
- at Controller@ provides the main Arvados API endpoint. As such, it is used extensively by Arvados clients inside and outside of the cluster. It is recommended to use a split-horizon DNS setup where @ClusterID.example.com@ resolves to an internal IP address from inside the Arvados cluster, and a publicly routed external IP address when resolved from outside the cluster. This will simplify firewalling and traffic routing. In a cloud environment where traffic that flows via public IP addresses is charged, using split horizon DNS can also save a lot of money.
-
-h2. Connection overview
-
-When a client wants to talk to an Arvados cluster, it needs to look up the endpoint of the particular API it wants to connect to. If the client does not have access to the @config.yml@ file, it connects to @Controller@ and retrieves the value of @InternalURLs@ or @ExternalURL@ for the service it wants to talk to. Arvados clients typically get the URL of the @Controller@ from the @ARVADOS_API_HOST@ environment variable.
-
-When an Arvados service with access to @config.yml@ needs to talk to another Arvados service, it looks up the value of @InternalURLs@ or @ExternalURL@ for the service it wants to talk to.
 
-Below is a list of Arvados clients (or services acting as a client), the Arvados service they connect to, and the configuration value that they use to find the appropriate endpoint.
-
-<div class="offset1">
-table(table table-bordered table-condensed).
-|_.Client     |_.Destination service|_.Target URL from |
-|arv        |Controller   |$ARVADOS_API_HOST       |
-|API client (e.g. SDK, arv-put, arv-get, etc) |Controller   |$ARVADOS_API_HOST       |
-|arv-ws     |Controller   |$ARVADOS_API_HOST       |
-|arv-ws     |Websocket    |Websocket.ExternalURL   |
-|arv-mount  |Controller   |$ARVADOS_API_HOST       |
-|arv-mount  |Keepstore    |Keepstore.InternalURLs  |
-|arv-mount  |Keepproxy    |Keepproxy.ExternalURL   |
-|Controller |RailsAPI     |RailsAPI.InternalURLs   |
-|Keep-balance|Keepstore   |Keepstore.InternalURLs  |
-|Keep client|Keepstore    |Keepstore.InternalURLs  |
-|Keep client|Keepproxy    |Keepproxy.ExternalURL   |
-|Nginx      |Controller   |Controller.InternalURLs |
-|Nginx      |Keepproxy    |Keepproxy.InternalURLs  |
-|Nginx      |Keep-web     |WebDAV.InternalURLs     |
-|Prometheus |Keep-balance |Keepbalance.InternalURLs|
-|Workbench2 |Keep-web     |WebDAV.ExternalURL      |
-|Workbench2 |Websocket    |Websocket.ExternalURL   |
-|webdav client|Keep-web   |WebDAV.ExternalURL      |
-</div>
diff --git a/doc/admin/metrics.html.textile.liquid b/doc/admin/metrics.html.textile.liquid
index 0cfa0a2e6..b140bcc1b 100644
--- a/doc/admin/metrics.html.textile.liquid
+++ b/doc/admin/metrics.html.textile.liquid
@@ -34,6 +34,7 @@ table(table table-bordered table-condensed table-hover).
 |arvados-api-server||
 |arvados-controller|✓|
 |arvados-dispatch-cloud|✓|
+|arvados-dispatch-lsf|✓|
 |arvados-git-httpd||
 |arvados-ws|✓|
 |composer||

commit b0d0360b250bed8e6e5a30f7ee057da0600bc99c
Author: Ward Vandewege <ward at curii.com>
Date:   Mon Dec 6 12:48:57 2021 -0500

    17667: add some missing information on the "Install prerequisites" page.
           Update the "Install PostgreSQL" page with a reference to Aurora
           RDS. Add a new page that details InternalURLs/ExternalURL.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_config.yml b/doc/_config.yml
index dde87323d..83be731e8 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -226,6 +226,7 @@ navbar:
       - install/config.html.textile.liquid
       - admin/config-migration.html.textile.liquid
       - admin/config.html.textile.liquid
+      - admin/config-urls.html.textile.liquid
     - Core:
       - install/install-api-server.html.textile.liquid
     - Keep:
diff --git a/doc/admin/config-urls.html.textile.liquid b/doc/admin/config-urls.html.textile.liquid
new file mode 100644
index 000000000..21de54973
--- /dev/null
+++ b/doc/admin/config-urls.html.textile.liquid
@@ -0,0 +1,271 @@
+---
+layout: default
+navsection: installguide
+title: InternalURLs and ExternalURL
+...
+
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+The Arvados configuration is stored at @/etc/arvados/config.yml at . See the "Configuration reference":config.html for more detail.
+
+The @Services@ section lists a number of Arvados services, each with an @InternalURLs@ and/or @ExternalURL@ configuration key. This document explains the precise meaning of these configuration keys, and how they are used by the Arvados services.
+
+Generally speaking, the keys under @InternalURLs@ are the endpoints where the service should be listening, and reachable from other hosts inside the Arvados cluster. The @ExternalURL@ value is the URL that the service advertises as its own URL. The @ExternalURL@ is the address where the service should be reachable from outside the Arvados cluster.
+
+Because many of the Arvados services live behind a reverse proxy (e.g. Nginx as used in this guide), configuring the reverse proxy and the @InternalURLs@ and @ExternalURL@ values must be done in concert.
+
+We'll walk through a number of examples to explain in more detail.
+
+h2. Keep-balance
+
+Consider this section for the @Keep-balance@ service:
+
+{% codeblock as yaml %}
+      Keepbalance:
+        InternalURLs:
+          "http://ClusterID.example.com:9005/": {}
+{% endcodeblock %}
+
+ at Keep-balance@ has an API endpoint, but it is only used to expose "Prometheus":https://prometheus.io metrics.
+
+There is no @ExternalURL@ key because @Keep-balance@ does not have an Arvados API, no Arvados services need to connect to @Keep-balance at .
+
+The value for @InternalURLs@ tells the @Keep-balance@ service to start up and listen on port 9005, if it is started on a host where @ClusterID.example.com@ resolves to a local IP address. If @Keep-balance@ is started on a machine where the @ClusterID.example.com@ hostname does not resolve to a local IP address, it would refuse to start up, because it would not be able to find a local IP address to listen on.
+
+It is also possible to use IP addresses in @InternalURLs@, for example:
+
+{% codeblock as yaml %}
+      Keepbalance:
+        InternalURLs:
+          "http://127.0.0.1:9005/": {}
+{% endcodeblock %}
+
+In this example, @Keep-balance@ would start up and listen on port 9005 at the @127.0.0.1@ IP address. Prometheus would only be able to access the @Keep-balance@ metrics if it could reach that IP and port, e.g. if it runs on the same machine.
+
+Finally, it is also possible to listen on all interfaces, for example:
+
+{% codeblock as yaml %}
+      Keepbalance:
+        InternalURLs:
+          "http://0.0.0.0:9005/": {}
+{% endcodeblock %}
+
+In this case, @Keep-balance@ will listen on port 9005 on all IP addresses local to the machine.
+
+h2. Keepstore
+
+Consider this section for the @Keepstore@ service:
+
+{% codeblock as yaml %}
+      Keepstore:
+        InternalURLs:
+          "http://keep0.ClusterID.example.com:25107": {}
+          "http://keep1.ClusterID.example.com:25107": {}
+{% endcodeblock %}
+
+There is no @ExternalURL@ key because @Keepstore@ is only accessed from inside the Arvados cluster. For access from outside, all traffic goes via @Keepproxy at .
+
+When @Keepstore@ is installed on the host where @keep0.ClusterID.example.com@ resolves to a local IP address, it will listen on port 25107 on that IP address. Likewise on the @keep1.ClusterID.example.com@ host. On all other systems, @Keepstore@ will refuse to start.
+
+h2. Keepproxy
+
+Consider this section for the @Keepproxy@ service:
+
+{% codeblock as yaml %}
+      Keepproxy:
+        ExternalURL: https://keep.ClusterID.example.com
+        InternalURLs:
+          "http://localhost:25107": {}
+{% endcodeblock %}
+
+The @ExternalURL@ advertised is @https://keep.ClusterID.example.com@. The @Keepproxy@ service will start up on @localhost@ port 25107, however. This is possible because we also configure Nginx to terminate SSL and sit in front of the @Keepproxy@ service:
+
+<notextile><pre><code>upstream keepproxy {
+  server                127.0.0.1:<span class="userinput">25107</span>;
+}
+
+server {
+  listen                  443 ssl;
+  server_name             <span class="userinput">keep.ClusterID.example.com</span>;
+
+  proxy_connect_timeout   90s;
+  proxy_read_timeout      300s;
+  proxy_set_header        X-Real-IP $remote_addr;
+  proxy_http_version      1.1;
+  proxy_request_buffering off;
+  proxy_max_temp_file_size 0;
+
+  ssl_certificate     <span class="userinput">/YOUR/PATH/TO/cert.pem</span>;
+  ssl_certificate_key <span class="userinput">/YOUR/PATH/TO/cert.key</span>;
+
+  # Clients need to be able to upload blocks of data up to 64MiB in size.
+  client_max_body_size    64m;
+
+  location / {
+    proxy_pass            http://keepproxy;
+  }
+}
+</code></pre></notextile>
+
+If a client connects to the @Keepproxy@ service, it will talk to Nginx which will reverse proxy the traffic to the @Keepproxy@ service.
+
+h2. Workbench
+
+Consider this section for the @Workbench@ service:
+
+{% codeblock as yaml %}
+  Workbench1:
+    ExternalURL: "https://workbench.ClusterID.example.com"
+{% endcodeblock %}
+
+The @ExternalURL@ advertised is @https://workbench.ClusterID.example.com@. There is no value for @InternalURLs@ because Workbench1 is a Rails application served by Passenger, and the listening host/post is configured in the Nginx configuration:
+
+<notextile><pre><code>
+server {
+  listen       443 ssl;
+  server_name  workbench.ClusterID.example.com;
+
+  ssl_certificate     /YOUR/PATH/TO/cert.pem;
+  ssl_certificate_key /YOUR/PATH/TO/cert.key;
+
+  root /var/www/arvados-workbench/current/public;
+  index  index.html;
+
+  passenger_enabled on;
+  # If you're using RVM, uncomment the line below.
+  #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+
+  # `client_max_body_size` should match the corresponding setting in
+  # the API.MaxRequestSize and Controller's server's Nginx configuration.
+  client_max_body_size 128m;
+}
+</code></pre></notextile>
+
+h2. API server
+
+Consider this section for the @API server@ service:
+
+{% codeblock as yaml %}
+      RailsAPI:
+        InternalURLs:
+          "http://localhost:8004": {}
+{% endcodeblock %}
+
+There is no @ExternalURL@ defined because the @API server@ is not directly accessible and does not need to advertise a URL: all traffic to it flows via @Controller@, which is the only client that talks to it.
+
+The @API server@ is also a Rails application, and its listening host/port is defined in the Nginx configuration:
+
+<notextile><pre><code>
+server {
+  # This configures the Arvados API server.  It is written using Ruby
+  # on Rails and uses the Passenger application server.
+
+  listen localhost:8004;
+  server_name localhost-api;
+
+  root /var/www/arvados-api/current/public;
+  index  index.html index.htm index.php;
+
+  passenger_enabled on;
+
+  # If you are using RVM, uncomment the line below.
+  # If you're using system ruby, leave it commented out.
+  #passenger_ruby /usr/local/rvm/wrappers/default/ruby;
+
+  # This value effectively limits the size of API objects users can
+  # create, especially collections.  If you change this, you should
+  # also ensure the following settings match it:
+  # * `client_max_body_size` in the previous server section
+  # * `API.MaxRequestSize` in config.yml
+  client_max_body_size 128m;
+}
+</code></pre></notextile>
+
+So then, why is there a need to specify @InternalURLs@ for the @RailsAPI@ service? It is there because this is how the @Controller@ service locates the @RailsAPI@ service it should talk to. Since this connection is internal to the Avados cluster, @Controller@ uses @InternalURLs@ to find the @RailsAPI@ endpoint.
+
+h2. Controller
+
+Consider this section for the @Controller@ service:
+
+{% codeblock as yaml %}
+  Controller:
+    InternalURLs:
+      "http://localhost:8003": {}
+    ExternalURL: "https://ClusterID.example.com"
+{% endcodeblock %}
+
+The @ExternalURL@ advertised is @https://ClusterID.example.com@. The @Controller@ service will start up on @localhost@ port 8003. Nginx is configured to terminate SSL and sit in front of the @Controller@ service:
+
+<notextile><pre><code>
+# This is the port where nginx expects to contact arvados-controller.
+upstream controller {
+  server     localhost:8003  fail_timeout=10s;
+}
+
+server {
+  # This configures the public https port that clients will actually connect to,
+  # the request is reverse proxied to the upstream 'controller'
+
+  listen       443 ssl;
+  server_name  ClusterID.example.com;
+
+  ssl_certificate     /YOUR/PATH/TO/cert.pem;
+  ssl_certificate_key /YOUR/PATH/TO/cert.key;
+
+  # Refer to the comment about this setting in the passenger (arvados
+  # api server) section of your Nginx configuration.
+  client_max_body_size 128m;
+
+  location / {
+    proxy_pass            http://controller;
+    proxy_redirect        off;
+    proxy_connect_timeout 90s;
+    proxy_read_timeout    300s;
+
+    proxy_set_header      Host              $http_host;
+    proxy_set_header      Upgrade           $http_upgrade;
+    proxy_set_header      Connection        "upgrade";
+    proxy_set_header      X-External-Client $external_client;
+    proxy_set_header      X-Forwarded-For   $proxy_add_x_forwarded_for;
+    proxy_set_header      X-Forwarded-Proto https;
+    proxy_set_header      X-Real-IP         $remote_addr;
+  }
+}
+</code></pre></notextile>
+
+ at Controller@ provides the main Arvados API endpoint. As such, it is used extensively by Arvados clients inside and outside of the cluster. It is recommended to use a split-horizon DNS setup where @ClusterID.example.com@ resolves to an internal IP address from inside the Arvados cluster, and a publicly routed external IP address when resolved from outside the cluster. This will simplify firewalling and traffic routing. In a cloud environment where traffic that flows via public IP addresses is charged, using split horizon DNS can also save a lot of money.
+
+h2. Connection overview
+
+When a client wants to talk to an Arvados cluster, it needs to look up the endpoint of the particular API it wants to connect to. If the client does not have access to the @config.yml@ file, it connects to @Controller@ and retrieves the value of @InternalURLs@ or @ExternalURL@ for the service it wants to talk to. Arvados clients typically get the URL of the @Controller@ from the @ARVADOS_API_HOST@ environment variable.
+
+When an Arvados service with access to @config.yml@ needs to talk to another Arvados service, it looks up the value of @InternalURLs@ or @ExternalURL@ for the service it wants to talk to.
+
+Below is a list of Arvados clients (or services acting as a client), the Arvados service they connect to, and the configuration value that they use to find the appropriate endpoint.
+
+<div class="offset1">
+table(table table-bordered table-condensed).
+|_.Client     |_.Destination service|_.Target URL from |
+|arv        |Controller   |$ARVADOS_API_HOST       |
+|API client (e.g. SDK, arv-put, arv-get, etc) |Controller   |$ARVADOS_API_HOST       |
+|arv-ws     |Controller   |$ARVADOS_API_HOST       |
+|arv-ws     |Websocket    |Websocket.ExternalURL   |
+|arv-mount  |Controller   |$ARVADOS_API_HOST       |
+|arv-mount  |Keepstore    |Keepstore.InternalURLs  |
+|arv-mount  |Keepproxy    |Keepproxy.ExternalURL   |
+|Controller |RailsAPI     |RailsAPI.InternalURLs   |
+|Keep-balance|Keepstore   |Keepstore.InternalURLs  |
+|Keep client|Keepstore    |Keepstore.InternalURLs  |
+|Keep client|Keepproxy    |Keepproxy.ExternalURL   |
+|Nginx      |Controller   |Controller.InternalURLs |
+|Nginx      |Keepproxy    |Keepproxy.InternalURLs  |
+|Nginx      |Keep-web     |WebDAV.InternalURLs     |
+|Prometheus |Keep-balance |Keepbalance.InternalURLs|
+|Workbench2 |Keep-web     |WebDAV.ExternalURL      |
+|Workbench2 |Websocket    |Websocket.ExternalURL   |
+|webdav client|Keep-web   |WebDAV.ExternalURL      |
+</div>
diff --git a/doc/install/install-manual-prerequisites.html.textile.liquid b/doc/install/install-manual-prerequisites.html.textile.liquid
index 084f32e02..360cfbabd 100644
--- a/doc/install/install-manual-prerequisites.html.textile.liquid
+++ b/doc/install/install-manual-prerequisites.html.textile.liquid
@@ -50,7 +50,7 @@ Arvados consists of many components, some of which may be omitted (at the cost o
 table(table table-bordered table-condensed).
 |\3=. *Core*|
 |"PostgreSQL database":install-postgresql.html |Stores data for the API server.|Required.|
-|"API server":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.|
+|"API server + Controller":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.|
 |\3=. *Keep (storage)*|
 |"Keepstore":install-keepstore.html |Stores content-addressed blocks in a variety of backends (local filesystem, cloud object storage).|Required.|
 |"Keepproxy":install-keepproxy.html |Gateway service to access keep servers from external networks.|Required to be able to use arv-put, arv-get, or arv-mount outside the private Arvados network.|
@@ -65,7 +65,8 @@ table(table table-bordered table-condensed).
 |"Git server":install-arv-git-httpd.html |Arvados-hosted git repositories, with Arvados-token based authentication.|Optional, but required by Workflow Composer.|
 |\3=. *Crunch (running containers)*|
 |"arvados-dispatch-cloud":crunch2-cloud/install-dispatch-cloud.html |Allocate and free cloud VM instances on demand based on workload.|Optional, not needed for a static Slurm cluster such as on-premises HPC.|
-|"crunch-dispatch-slurm":crunch2-slurm/install-dispatch.html |Run analysis workflows using Docker containers distributed across a Slurm cluster.|Optional, not needed for a Cloud installation, or if you wish to use Arvados for data management only.|
+|"crunch-dispatch-slurm":crunch2-slurm/install-dispatch.html |Run analysis workflows using Docker or Singularity containers distributed across a Slurm cluster.|Optional, not needed for a Cloud installation, or if you wish to use Arvados for data management only.|
+|"crunch-dispatch-lsf":crunch2-lsf/install-dispatch.html |Run analysis workflows using Docker or Singularity containers distributed across an LSF cluster.|Optional, not needed for a Cloud installation, or if you wish to use Arvados for data management only.|
 
 h2(#identity). Identity provider
 
@@ -97,7 +98,8 @@ h2(#scheduler). Container compute scheduler
 Choose which backend you will use to schedule computation.
 
 * On AWS EC2 and Azure, you probably want to use @arvados-dispatch-cloud@ to manage the full lifecycle of cloud compute nodes: starting up nodes sized to the container request, executing containers on those nodes, and shutting nodes down when no longer needed.
-* For on-premise HPC clusters using "slurm":https://slurm.schedmd.com/ use @crunch-dispatch-slurm@ to execute containers with slurm job submissions.
+* For on-premises HPC clusters using "slurm":https://slurm.schedmd.com/ use @crunch-dispatch-slurm@ to execute containers with slurm job submissions.
+* For on-premises HPC clusters using "LSF":https://www.ibm.com/products/hpc-workload-management/ use @crunch-dispatch-lsf@ to execute containers with slurm job submissions.
 * For single node demos, use @crunch-dispatch-local@ to execute containers directly.
 
 h2(#machines). Hardware (or virtual machines)
@@ -117,7 +119,7 @@ table(table table-bordered table-condensed).
 </div>
 
 ^1^ Should be scaled up as needed
-^2^ Refers to shell nodes managed by Arvados, that provide ssh access for users to interact with Arvados at the command line.  Optional.
+^2^ Refers to shell nodes managed by Arvados that provide ssh access for users to interact with Arvados at the command line.  Optional.
 
 {% include 'notebox_begin' %}
 For a small demo installation, it is possible to run all the Arvados services on a single node.  Special considerations for single-node installs will be noted in boxes like this.
@@ -145,6 +147,7 @@ h2(#dnstls). DNS entries and TLS certificates
 The following services are normally public-facing and require DNS entries and corresponding TLS certificates.  Get certificates from your preferred TLS certificate provider.  We recommend using "Let's Encrypt":https://letsencrypt.org/.  You can run several services on the same node, but each distinct DNS name requires a valid, matching TLS certificate.
 
 This guide uses the following DNS name conventions.  A later part of this guide will describe how to set up Nginx virtual hosts.
+It is possible to use custom DNS names for the Arvados services.
 
 <div class="offset1">
 table(table table-bordered table-condensed).
diff --git a/doc/install/install-postgresql.html.textile.liquid b/doc/install/install-postgresql.html.textile.liquid
index 60afa1e24..1413890cd 100644
--- a/doc/install/install-postgresql.html.textile.liquid
+++ b/doc/install/install-postgresql.html.textile.liquid
@@ -11,9 +11,14 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados requires at least version *9.4* of PostgreSQL.
 
+* "AWS":#aws
 * "CentOS 7":#centos7
 * "Debian or Ubuntu":#debian
 
+h3(#aws). AWS
+
+When deploying on AWS, Arvados can use an Aurora RDS PostgreSQL database. Aurora Serverless is not recommended.
+
 h3(#centos7). CentOS 7
 {% assign rh_version = "7" %}
 {% include 'note_python_sc' %}
@@ -35,4 +40,4 @@ Debian 10 (Buster) and Ubuntu 16.04 (Xenial) and later versions include a suffic
 # Install PostgreSQL
   <notextile><pre># <span class="userinput">apt-get --no-install-recommends install postgresql postgresql-contrib</span></pre></notextile>
 # Configure the database to launch at boot and start now
-  <notextile><pre># <span class="userinput">systemctl enable --now postgresql</span></pre></notextile>
+<notextile><pre># <span class="userinput">systemctl enable --now postgresql</span></pre></notextile>

commit 56c37ef9b76b992dad59524cae6c34b86bb911d0
Author: Tom Clegg <tom at curii.com>
Date:   Mon Dec 6 19:32:34 2021 -0500

    18547: Error out if two volumes return the same non-empty DeviceID.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keep-balance/balance.go b/services/keep-balance/balance.go
index fa01d512b..eb6f580f4 100644
--- a/services/keep-balance/balance.go
+++ b/services/keep-balance/balance.go
@@ -8,6 +8,7 @@ import (
 	"bytes"
 	"context"
 	"crypto/md5"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -266,6 +267,29 @@ func (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {
 		}
 	}
 
+	mountProblem := false
+	type deviceMount struct {
+		srv *KeepService
+		mnt *KeepMount
+	}
+	deviceMounted := map[string]deviceMount{} // DeviceID -> mount
+	for _, srv := range bal.KeepServices {
+		for _, mnt := range srv.mounts {
+			if first, dup := deviceMounted[mnt.DeviceID]; dup && first.mnt.UUID != mnt.UUID && mnt.DeviceID != "" {
+				bal.logf("config error: device %s is mounted with multiple volume UUIDs: %s on %s, and %s on %s",
+					mnt.DeviceID,
+					first.mnt.UUID, first.srv,
+					mnt.UUID, srv)
+				mountProblem = true
+				continue
+			}
+			deviceMounted[mnt.DeviceID] = deviceMount{srv, mnt}
+		}
+	}
+	if mountProblem {
+		return errors.New("cannot continue with config errors (see above)")
+	}
+
 	var checkPage arvados.CollectionList
 	if err = c.RequestAndDecode(&checkPage, "GET", "arvados/v1/collections", nil, arvados.ResourceListParams{
 		Limit:              new(int),
diff --git a/services/keep-balance/balance_run_test.go b/services/keep-balance/balance_run_test.go
index 4e2c6803c..0d1b6b591 100644
--- a/services/keep-balance/balance_run_test.go
+++ b/services/keep-balance/balance_run_test.go
@@ -397,6 +397,32 @@ func (s *runSuite) TestRefuseNonAdmin(c *check.C) {
 	c.Check(pullReqs.Count(), check.Equals, 0)
 }
 
+func (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) {
+	opts := RunOptions{
+		CommitPulls: true,
+		CommitTrash: true,
+		Logger:      ctxlog.TestLogger(c),
+	}
+	s.stub.serveCurrentUserAdmin()
+	s.stub.serveZeroCollections()
+	s.stub.serveKeepServices(stubServices)
+	s.stub.mux.HandleFunc("/mounts", func(w http.ResponseWriter, r *http.Request) {
+		hostid := r.Host[:5] // "keep0.zzzzz.arvadosapi.com:25107" => "keep0"
+		json.NewEncoder(w).Encode([]arvados.KeepMount{{
+			UUID:           "zzzzz-ivpuk-0000000000" + hostid,
+			DeviceID:       "keep0-vol0",
+			StorageClasses: map[string]bool{"default": true},
+		}})
+	})
+	trashReqs := s.stub.serveKeepstoreTrash()
+	pullReqs := s.stub.serveKeepstorePull()
+	srv := s.newServer(&opts)
+	_, err := srv.runOnce()
+	c.Check(err, check.ErrorMatches, "cannot continue with config errors.*")
+	c.Check(trashReqs.Count(), check.Equals, 0)
+	c.Check(pullReqs.Count(), check.Equals, 0)
+}
+
 func (s *runSuite) TestWriteLostBlocks(c *check.C) {
 	lostf, err := ioutil.TempFile("", "keep-balance-lost-blocks-test-")
 	c.Assert(err, check.IsNil)

commit 11864d817434e1f3e36cf3c0ef9ab37736938f65
Author: Tom Clegg <tom at curii.com>
Date:   Fri Dec 3 10:54:59 2021 -0500

    18547: Use volume UUID instead of DeviceID to deduplicate mounts.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keep-balance/balance.go b/services/keep-balance/balance.go
index bb590e13b..fa01d512b 100644
--- a/services/keep-balance/balance.go
+++ b/services/keep-balance/balance.go
@@ -217,8 +217,8 @@ func (bal *Balancer) cleanupMounts() {
 	rwdev := map[string]*KeepService{}
 	for _, srv := range bal.KeepServices {
 		for _, mnt := range srv.mounts {
-			if !mnt.ReadOnly && mnt.DeviceID != "" {
-				rwdev[mnt.DeviceID] = srv
+			if !mnt.ReadOnly {
+				rwdev[mnt.UUID] = srv
 			}
 		}
 	}
@@ -227,8 +227,8 @@ func (bal *Balancer) cleanupMounts() {
 	for _, srv := range bal.KeepServices {
 		var dedup []*KeepMount
 		for _, mnt := range srv.mounts {
-			if mnt.ReadOnly && rwdev[mnt.DeviceID] != nil {
-				bal.logf("skipping srv %s readonly mount %q because same device %q is mounted read-write on srv %s", srv, mnt.UUID, mnt.DeviceID, rwdev[mnt.DeviceID])
+			if mnt.ReadOnly && rwdev[mnt.UUID] != nil {
+				bal.logf("skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s", srv, mnt.UUID, rwdev[mnt.UUID])
 			} else {
 				dedup = append(dedup, mnt)
 			}
@@ -357,12 +357,10 @@ func (bal *Balancer) GetCurrentState(ctx context.Context, c *arvados.Client, pag
 	deviceMount := map[string]*KeepMount{}
 	for _, srv := range bal.KeepServices {
 		for _, mnt := range srv.mounts {
-			equiv := deviceMount[mnt.DeviceID]
+			equiv := deviceMount[mnt.UUID]
 			if equiv == nil {
 				equiv = mnt
-				if mnt.DeviceID != "" {
-					deviceMount[mnt.DeviceID] = equiv
-				}
+				deviceMount[mnt.UUID] = equiv
 			}
 			equivMount[equiv] = append(equivMount[equiv], mnt)
 		}
@@ -667,7 +665,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
 				// new/remaining replicas uniformly
 				// across qualifying mounts on a given
 				// server.
-				return rendezvousLess(si.mnt.DeviceID, sj.mnt.DeviceID, blkid)
+				return rendezvousLess(si.mnt.UUID, sj.mnt.UUID, blkid)
 			}
 		})
 
@@ -692,7 +690,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
 		// and returns true if all requirements are met.
 		trySlot := func(i int) bool {
 			slot := slots[i]
-			if wantMnt[slot.mnt] || wantDev[slot.mnt.DeviceID] {
+			if wantMnt[slot.mnt] || wantDev[slot.mnt.UUID] {
 				// Already allocated a replica to this
 				// backend device, possibly on a
 				// different server.
@@ -707,9 +705,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
 				slots[i].want = true
 				wantSrv[slot.mnt.KeepService] = true
 				wantMnt[slot.mnt] = true
-				if slot.mnt.DeviceID != "" {
-					wantDev[slot.mnt.DeviceID] = true
-				}
+				wantDev[slot.mnt.UUID] = true
 				replWant += slot.mnt.Replication
 			}
 			return replProt >= desired && replWant >= desired
@@ -751,7 +747,7 @@ func (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) ba
 		// haven't already been added to unsafeToDelete
 		// because the servers report different Mtimes.
 		for _, slot := range slots {
-			if slot.repl != nil && wantDev[slot.mnt.DeviceID] {
+			if slot.repl != nil && wantDev[slot.mnt.UUID] {
 				unsafeToDelete[slot.repl.Mtime] = true
 			}
 		}
@@ -834,7 +830,7 @@ func computeBlockState(slots []slot, onlyCount map[*KeepMount]bool, have, needRe
 		if onlyCount != nil && !onlyCount[slot.mnt] {
 			continue
 		}
-		if countedDev[slot.mnt.DeviceID] {
+		if countedDev[slot.mnt.UUID] {
 			continue
 		}
 		switch {
@@ -848,9 +844,7 @@ func computeBlockState(slots []slot, onlyCount map[*KeepMount]bool, have, needRe
 			bbs.pulling++
 			repl += slot.mnt.Replication
 		}
-		if slot.mnt.DeviceID != "" {
-			countedDev[slot.mnt.DeviceID] = true
-		}
+		countedDev[slot.mnt.UUID] = true
 	}
 	if repl < needRepl {
 		bbs.unachievable = true
diff --git a/services/keep-balance/balance_test.go b/services/keep-balance/balance_test.go
index c529ac150..df04145b9 100644
--- a/services/keep-balance/balance_test.go
+++ b/services/keep-balance/balance_test.go
@@ -167,8 +167,8 @@ func (bal *balancerSuite) testMultipleViews(c *check.C, readonly bool) {
 		srv.mounts[0].KeepMount.DeviceID = fmt.Sprintf("writable-by-srv-%x", i)
 		srv.mounts = append(srv.mounts, &KeepMount{
 			KeepMount: arvados.KeepMount{
-				DeviceID:       fmt.Sprintf("writable-by-srv-%x", (i+1)%len(bal.srvs)),
-				UUID:           fmt.Sprintf("zzzzz-mount-%015x", i<<16),
+				DeviceID:       bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,
+				UUID:           bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,
 				ReadOnly:       readonly,
 				Replication:    1,
 				StorageClasses: map[string]bool{"default": true},
@@ -347,6 +347,7 @@ func (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {
 func (bal *balancerSuite) TestCleanupMounts(c *check.C) {
 	bal.srvs[3].mounts[0].KeepMount.ReadOnly = true
 	bal.srvs[3].mounts[0].KeepMount.DeviceID = "abcdef"
+	bal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID
 	bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
 	c.Check(len(bal.srvs[3].mounts), check.Equals, 1)
 	bal.cleanupMounts()
@@ -485,32 +486,32 @@ func (bal *balancerSuite) TestVolumeReplication(c *check.C) {
 }
 
 func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {
-	bal.srvs[0].mounts[0].KeepMount.DeviceID = "abcdef"
-	bal.srvs[9].mounts[0].KeepMount.DeviceID = "abcdef"
-	bal.srvs[14].mounts[0].KeepMount.DeviceID = "abcdef"
+	dupUUID := bal.srvs[0].mounts[0].KeepMount.UUID
+	bal.srvs[9].mounts[0].KeepMount.UUID = dupUUID
+	bal.srvs[14].mounts[0].KeepMount.UUID = dupUUID
 	// block 0 belongs on servers 3 and e, which have different
-	// device IDs.
+	// UUIDs.
 	bal.try(c, tester{
 		known:      0,
 		desired:    map[string]int{"default": 2},
 		current:    slots{1},
 		shouldPull: slots{0}})
 	// block 1 belongs on servers 0 and 9, which both report
-	// having a replica, but the replicas are on the same device
-	// ID -- so we should pull to the third position (7).
+	// having a replica, but the replicas are on the same volume
+	// -- so we should pull to the third position (7).
 	bal.try(c, tester{
 		known:      1,
 		desired:    map[string]int{"default": 2},
 		current:    slots{0, 1},
 		shouldPull: slots{2}})
-	// block 1 can be pulled to the doubly-mounted device, but the
+	// block 1 can be pulled to the doubly-mounted volume, but the
 	// pull should only be done on the first of the two servers.
 	bal.try(c, tester{
 		known:      1,
 		desired:    map[string]int{"default": 2},
 		current:    slots{2},
 		shouldPull: slots{0}})
-	// block 0 has one replica on a single device mounted on two
+	// block 0 has one replica on a single volume mounted on two
 	// servers (e,9 at positions 1,9). Trashing the replica on 9
 	// would lose the block.
 	bal.try(c, tester{
@@ -523,7 +524,7 @@ func (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {
 			pulling: 1,
 		}})
 	// block 0 is overreplicated, but the second and third
-	// replicas are the same replica according to DeviceID
+	// replicas are the same replica according to volume UUID
 	// (despite different Mtimes). Don't trash the third replica.
 	bal.try(c, tester{
 		known:   0,
@@ -595,7 +596,7 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
 		desired:          map[string]int{"default": 2, "special": 1},
 		current:          slots{0, 1},
 		shouldPull:       slots{9},
-		shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+		shouldPullMounts: []string{"zzzzz-mount-special20000009"}})
 	// If some storage classes are not satisfied, don't trash any
 	// excess replicas. (E.g., if someone desires repl=1 on
 	// class=durable, and we have two copies on class=volatile, we
@@ -605,7 +606,7 @@ func (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {
 		desired:          map[string]int{"special": 1},
 		current:          slots{0, 1},
 		shouldPull:       slots{9},
-		shouldPullMounts: []string{"zzzzz-mount-special00000009"}})
+		shouldPullMounts: []string{"zzzzz-mount-special20000009"}})
 	// Once storage classes are satisfied, trash excess replicas
 	// that appear earlier in probe order but aren't needed to
 	// satisfy the desired classes.

commit b008c44eaf5c6b45c9f36116601918748aeb8323
Author: Tom Clegg <tom at curii.com>
Date:   Thu Nov 18 14:31:01 2021 -0500

    18376: Retry up to 4 times on EBADCOOKIE from readdir.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index 46f4db409..a053ba3e6 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -379,23 +379,25 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
 			continue
 		}
 		blockdirpath := filepath.Join(v.Root, subdir)
-		blockdir, err := v.os.Open(blockdirpath)
-		if err != nil {
-			v.logger.WithError(err).Errorf("error reading %q", blockdirpath)
-			return fmt.Errorf("error reading %q: %s", blockdirpath, err)
-		}
-		v.os.stats.TickOps("readdir")
-		v.os.stats.Tick(&v.os.stats.ReaddirOps)
-		// ReadDir() (compared to Readdir(), which returns
-		// FileInfo structs) helps complete the sequence of
-		// readdirent calls as quickly as possible, reducing
-		// the likelihood of NFS EBADCOOKIE (523) errors.
-		dirents, err := blockdir.ReadDir(-1)
-		blockdir.Close()
-		if err != nil {
-			v.logger.WithError(err).Errorf("error reading %q", blockdirpath)
-			return fmt.Errorf("error reading %q: %s", blockdirpath, err)
+
+		var dirents []os.DirEntry
+		for attempt := 0; ; attempt++ {
+			v.os.stats.TickOps("readdir")
+			v.os.stats.Tick(&v.os.stats.ReaddirOps)
+			dirents, err = os.ReadDir(blockdirpath)
+			if err == nil {
+				break
+			} else if attempt < 5 && strings.Contains(err.Error(), "errno 523") {
+				// EBADCOOKIE (NFS stopped accepting
+				// our readdirent cookie) -- retry a
+				// few times before giving up
+				v.logger.WithError(err).Printf("retry after error reading %s", blockdirpath)
+				continue
+			} else {
+				return err
+			}
 		}
+
 		for _, dirent := range dirents {
 			fileInfo, err := dirent.Info()
 			if os.IsNotExist(err) {

commit 51c1bbb2f68e1046e8684985935fce932df08667
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Tue Nov 30 12:37:00 2021 -0300

    18491: Removes an unnecessary check that made the test fail on module upgrade.
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/lib/controller/localdb/container_gateway_test.go b/lib/controller/localdb/container_gateway_test.go
index 2a7735767..70037cc50 100644
--- a/lib/controller/localdb/container_gateway_test.go
+++ b/lib/controller/localdb/container_gateway_test.go
@@ -210,10 +210,9 @@ func (s *ContainerGatewaySuite) TestConnect(c *check.C) {
 		// Receive binary
 		_, err = io.ReadFull(sshconn.Conn, buf[:4])
 		c.Check(err, check.IsNil)
-		c.Check(buf[:4], check.DeepEquals, []byte{0, 0, 1, 0xfc})
 
 		// If we can get this far into an SSH handshake...
-		c.Log("success, tunnel is working")
+		c.Logf("was able to read %x -- success, tunnel is working", buf[:4])
 	}()
 	select {
 	case <-done:

commit 5c7b5f03de13e259ccedd2d09d5626c1eabdc934
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Mon Dec 6 13:45:32 2021 -0500

    github.com/Azure/go-autorest/autorest/azure/auth go-jwt security fix
    
    refs #18491
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/go.mod b/go.mod
index adca449b7..02e4f3e4e 100644
--- a/go.mod
+++ b/go.mod
@@ -5,8 +5,10 @@ go 1.13
 require (
 	github.com/AdRoll/goamz v0.0.0-20170825154802-2731d20f46f4
 	github.com/Azure/azure-sdk-for-go v45.1.0+incompatible
-	github.com/Azure/go-autorest/autorest v0.11.3
-	github.com/Azure/go-autorest/autorest/azure/auth v0.5.1
+	github.com/Azure/go-autorest/autorest v0.11.22
+	github.com/Azure/go-autorest/autorest/adal v0.9.17 // indirect
+	github.com/Azure/go-autorest/autorest/azure/auth v0.5.9
+	github.com/Azure/go-autorest/autorest/azure/cli v0.4.4 // indirect
 	github.com/Azure/go-autorest/autorest/to v0.4.0
 	github.com/Azure/go-autorest/autorest/validation v0.3.0 // indirect
 	github.com/Microsoft/go-winio v0.4.5 // indirect
@@ -33,6 +35,7 @@ require (
 	github.com/go-asn1-ber/asn1-ber v1.4.1 // indirect
 	github.com/go-ldap/ldap v3.0.3+incompatible
 	github.com/gogo/protobuf v1.1.1
+	github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
 	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
 	github.com/gorilla/context v1.1.1 // indirect
 	github.com/gorilla/mux v1.6.1-0.20180107155708-5bbbb5b2b572
@@ -58,10 +61,10 @@ require (
 	github.com/sirupsen/logrus v1.8.1
 	github.com/src-d/gcfg v1.3.0 // indirect
 	github.com/xanzy/ssh-agent v0.1.0 // indirect
-	golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
-	golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
+	golang.org/x/crypto v0.0.0-20211202192323-5770296d904e
+	golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
 	golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
-	golang.org/x/sys v0.0.0-20210603125802-9665404d3644
+	golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1
 	golang.org/x/tools v0.1.2 // indirect
 	google.golang.org/api v0.13.0
 	gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
diff --git a/go.sum b/go.sum
index 2f575eae9..eea427da0 100644
--- a/go.sum
+++ b/go.sum
@@ -6,40 +6,39 @@ github.com/Azure/azure-sdk-for-go v45.1.0+incompatible h1:kxtaPD8n2z5Za+9e3sKsYG
 github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
 github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
 github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest v0.11.3 h1:fyYnmYujkIXUgv88D9/Wo2ybE4Zwd/TmQd5sSI5u2Ws=
-github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
-github.com/Azure/go-autorest/autorest/adal v0.9.2 h1:Aze/GQeAN1RRbGmnUJvUj+tFGBzFdIg3293/A9rbxC4=
-github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.1 h1:bvUhZciHydpBxBmCheUgxxbSwJy7xcfjkUsjUcqSojc=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s=
+github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.22 h1:bXiQwDjrRmBQOE67bwlvUKAC1EU1yZTPQ38c+bstZws=
+github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.17 h1:esOPl2dhcz9P3jqBSJ8tPGEj2EqzPPT6zfyuloiogKY=
+github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.9 h1:Y2CgdzitFDsdMwYMzf9LIZWrrTFysqbRc7b94XVVJ78=
+github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.4 h1:iuooz5cZL6VRcO7DVSFYxRcouqn6bFVE/e77Wts50Zk=
+github.com/Azure/go-autorest/autorest/azure/cli v0.4.4/go.mod h1:yAQ2b6eP/CmLPnmLvxtT1ALIY3OR1oFcCqVBi8vHiTc=
 github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
 github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
 github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
 github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
 github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss=
 github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
-github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE=
-github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
 github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/Microsoft/go-winio v0.4.5 h1:U2XsGR5dBg1yzwSEJoP2dE2/aAXpmad+CNG2hE9Pd5k=
 github.com/Microsoft/go-winio v0.4.5/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
 github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
 github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
@@ -60,13 +59,11 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
 github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
 github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092 h1:0Di2onNnlN5PAyWPbqlPyN45eOQ+QW/J9eqLynt4IV4=
 github.com/bradleypeabody/godap v0.0.0-20170216002349-c249933bc092/go.mod h1:8IzBjZCRSnsvM6MJMG8HNNtnzMl48H22rbJL2kRUJ0Y=
 github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=
 github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
-github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
 github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
@@ -77,10 +74,9 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
 github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
+github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
 github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
 github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
 github.com/docker/distribution v2.6.0-rc.1.0.20180105232752-277ed486c948+incompatible h1:PVtvnmmxSMUcT5AY6vG7sCCzRg3eyoW6vQvXtITC60c=
@@ -95,6 +91,7 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
 github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
 github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
@@ -104,40 +101,35 @@ github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aev
 github.com/go-asn1-ber/asn1-ber v1.4.1 h1:qP/QDxOtmMoJVgXHCXNzDpA0+wkgYB2x5QoLMVOciyw=
 github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
 github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=
 github.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
-github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
 github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
 github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
 github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
-github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
+github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
 github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
 github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY=
 github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
@@ -164,21 +156,15 @@ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhB
 github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc h1:JJPhSHowepOF2+ElJVyb9jgt5ZyBkPMkPuhS0uODSFs=
 github.com/johannesboyne/gofakes3 v0.0.0-20200716060623-6b2b4cb092cc/go.mod h1:fNiSoOiEI5KlkWXn26OwKnNe58ilTIkpBlgOrt7Olu8=
 github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
 github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
 github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
 github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
 github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
 github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5 h1:xXn0nBttYwok7DhU4RxqaADEpQn7fEMt5kKc3yoj/n0=
 github.com/kevinburke/ssh_config v0.0.0-20171013211458-802051befeb5/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
 github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
-github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=
 github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
 github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4=
@@ -188,14 +174,11 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9 h1:ZivaaKmjs9q90zi6I4gTLW6tbVGtlBjellr3hMYaly0=
 github.com/msteinert/pam v0.0.0-20190215180659-f29b9f28d6f9/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
-github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
 github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -234,16 +217,13 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
 github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 h1:J6qvD6rbmOil46orKqJaRPG+zTpoGlBTUdyv8ki63L0=
 github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63/go.mod h1:n+VKSARF5y/tS9XFSP7vWDfS+GUC5vs/YT7M5XDTUEM=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
 github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
 github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
 github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -251,28 +231,22 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/xanzy/ssh-agent v0.1.0 h1:lOhdXLxtmYjaHc76ZtNmJWPg948y/RnT+3N3cvKWFzY=
 github.com/xanzy/ssh-agent v0.1.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
-github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
 go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
+golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI=
 golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -286,10 +260,10 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
 golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@@ -299,9 +273,6 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -314,23 +285,20 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
-golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -340,8 +308,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
 golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
 golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -364,14 +330,12 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405 h1:829vOVxxusYHC+IqBtkX5mbKtsY9fheQiQn0MZRVLfQ=
 gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
 gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
 gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
 gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -385,7 +349,6 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
 gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA=
 honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 rsc.io/getopt v0.0.0-20170811000552-20be20937449 h1:UukjJOsjQH0DIuyyrcod6CXHS6cdaMMuJmrt+SN1j4A=
 rsc.io/getopt v0.0.0-20170811000552-20be20937449/go.mod h1:dhCdeqAxkyt5u3/sKRkUXuHaMXUu1Pt13GTQAM2xnig=

commit affe6b0a9bb79572dae80c5e3a3383fea66801eb
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Thu Dec 2 10:31:27 2021 -0500

    Merge branch '18511-java-sdk-groups-links' refs #18511
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java
index e9fbdb744..6482215b1 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Group.java
@@ -28,7 +28,7 @@ public class Group extends Item {
     private String groupClass;
     @JsonProperty("description")
     private String description;
-    @JsonProperty("writable_by")
+    @JsonProperty(value = "writable_by", access = JsonProperty.Access.WRITE_ONLY)
     private List<String> writableBy;
     @JsonProperty("delete_at")
     private LocalDateTime deleteAt;
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java
index a24f02a01..1d1a20fc7 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/Link.java
@@ -15,15 +15,19 @@ import com.fasterxml.jackson.annotation.JsonPropertyOrder;
 
 @JsonInclude(JsonInclude.Include.NON_NULL)
 @JsonIgnoreProperties(ignoreUnknown = true)
- at JsonPropertyOrder({ "name", "head_kind", "head_uuid", "link_class" })
+ at JsonPropertyOrder({"name", "head_kind", "head_uuid", "link_class"})
 public class Link extends Item {
 
     @JsonProperty("name")
     private String name;
-    @JsonProperty("head_kind")
+    @JsonProperty(value = "head_kind", access = JsonProperty.Access.WRITE_ONLY)
     private String headKind;
     @JsonProperty("head_uuid")
     private String headUuid;
+    @JsonProperty("tail_uuid")
+    private String tailUuid;
+    @JsonProperty(value = "tail_kind", access = JsonProperty.Access.WRITE_ONLY)
+    private String tailKind;
     @JsonProperty("link_class")
     private String linkClass;
 
@@ -39,6 +43,14 @@ public class Link extends Item {
         return headUuid;
     }
 
+    public String getTailUuid() {
+        return tailUuid;
+    }
+
+    public String getTailKind() {
+        return tailKind;
+    }
+
     public String getLinkClass() {
         return linkClass;
     }
@@ -55,6 +67,14 @@ public class Link extends Item {
         this.headUuid = headUuid;
     }
 
+    public void setTailUuid(String tailUuid) {
+        this.tailUuid = tailUuid;
+    }
+
+    public void setTailKind(String tailKind) {
+        this.tailKind = tailKind;
+    }
+
     public void setLinkClass(String linkClass) {
         this.linkClass = linkClass;
     }
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/LinkApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/LinkApiClientTest.java
new file mode 100644
index 000000000..f051b5603
--- /dev/null
+++ b/sdk/java-v2/src/test/java/org/arvados/client/api/client/LinkApiClientTest.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.client;
+
+import okhttp3.mockwebserver.RecordedRequest;
+import org.arvados.client.api.model.Link;
+import org.arvados.client.api.model.LinkList;
+import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.arvados.client.test.utils.RequestMethod;
+import org.junit.Test;
+
+import static org.arvados.client.test.utils.ApiClientTestUtils.assertAuthorizationHeader;
+import static org.arvados.client.test.utils.ApiClientTestUtils.assertRequestMethod;
+import static org.arvados.client.test.utils.ApiClientTestUtils.assertRequestPath;
+import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
+
+public class LinkApiClientTest extends ArvadosClientMockedWebServerTest {
+
+    private static final String RESOURCE = "links";
+
+    private final LinksApiClient client = new LinksApiClient(CONFIG);
+
+    @Test
+    public void listLinks() throws Exception {
+        // given
+        server.enqueue(getResponse("links-list"));
+
+        // when
+        LinkList actual = client.list();
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertThat(actual.getItemsAvailable()).isEqualTo(2);
+    }
+
+    @Test
+    public void getLink() throws Exception {
+        // given
+        server.enqueue(getResponse("links-get"));
+
+        String uuid = "arkau-o0j2j-huxuaxbi46s1yml";
+
+        // when
+        Link actual = client.get(uuid);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE + "/" + uuid);
+        assertRequestMethod(request, RequestMethod.GET);
+        assertEquals(actual.getUuid(), uuid);
+        assertEquals(actual.getName(), "can_read");
+        assertEquals(actual.getHeadKind(), "arvados#group");
+        assertEquals(actual.getHeadUuid(), "arkau-j7d0g-fcedae2076pw56h");
+        assertEquals(actual.getTailUuid(), "ardev-tpzed-n3kzq4fvoks3uw4");
+        assertEquals(actual.getTailKind(), "arvados#user");
+        assertEquals(actual.getLinkClass(), "permission");
+    }
+
+    @Test
+    public void createLink() throws Exception {
+        // given
+        server.enqueue(getResponse("links-create"));
+
+        String name = "Star Link";
+
+        Link collection = new Link();
+        collection.setName(name);
+
+        // when
+        Link actual = client.create(collection);
+
+        // then
+        RecordedRequest request = server.takeRequest();
+        assertAuthorizationHeader(request);
+        assertRequestPath(request, RESOURCE);
+        assertRequestMethod(request, RequestMethod.POST);
+        assertThat(actual.getName()).isEqualTo(name);
+        assertEquals(actual.getName(), name);
+        assertEquals(actual.getUuid(), "arkau-o0j2j-huxuaxbi46s1yml");
+        assertEquals(actual.getHeadKind(), "arvados#group");
+        assertEquals(actual.getHeadUuid(), "arkau-j7d0g-fcedae2076pw56h");
+        assertEquals(actual.getTailUuid(), "ardev-tpzed-n3kzq4fvoks3uw4");
+        assertEquals(actual.getTailKind(), "arvados#user");
+        assertEquals(actual.getLinkClass(), "star");
+    }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json
new file mode 100644
index 000000000..0664d886f
--- /dev/null
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-create.json
@@ -0,0 +1,18 @@
+{
+  "href": "/links/arkau-o0j2j-huxuaxbi46s1yml",
+  "kind": "arvados#link",
+  "etag": "zw1rlnbig0kpm9btw8us3pn9",
+  "uuid": "arkau-o0j2j-huxuaxbi46s1yml",
+  "owner_uuid": "arkau-tpzed-000000000000000",
+  "created_at": "2021-11-30T08:45:04.373354745Z",
+  "modified_by_client_uuid": null,
+  "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "modified_at": "2021-11-30T08:45:04.374489000Z",
+  "tail_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "link_class": "star",
+  "name": "Star Link",
+  "head_uuid": "arkau-j7d0g-fcedae2076pw56h",
+  "head_kind": "arvados#group",
+  "tail_kind": "arvados#user",
+  "properties": {}
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json
new file mode 100644
index 000000000..25f63bda6
--- /dev/null
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-get.json
@@ -0,0 +1,18 @@
+{
+  "href": "/links/arkau-o0j2j-huxuaxbi46s1yml",
+  "kind": "arvados#link",
+  "etag": "zw1rlnbig0kpm9btw8us3pn9",
+  "uuid": "arkau-o0j2j-huxuaxbi46s1yml",
+  "owner_uuid": "arkau-tpzed-000000000000000",
+  "created_at": "2021-11-30T08:45:04.373354745Z",
+  "modified_by_client_uuid": null,
+  "modified_by_user_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "modified_at": "2021-11-30T08:45:04.374489000Z",
+  "tail_uuid": "ardev-tpzed-n3kzq4fvoks3uw4",
+  "link_class": "permission",
+  "name": "can_read",
+  "head_uuid": "arkau-j7d0g-fcedae2076pw56h",
+  "head_kind": "arvados#group",
+  "tail_kind": "arvados#user",
+  "properties": {}
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json
new file mode 100644
index 000000000..e720ecf49
--- /dev/null
+++ b/sdk/java-v2/src/test/resources/org/arvados/client/api/client/links-list.json
@@ -0,0 +1,46 @@
+{
+  "kind": "arvados#linkList",
+  "etag": "",
+  "self_link": "",
+  "offset": 0,
+  "limit": 100,
+  "items": [
+    {
+      "href": "/links/arkau-o0j2j-x2b4rdadxs2fizn",
+      "kind": "arvados#link",
+      "etag": "dkhtr9tvp9zfy0d90xjn7w1t7",
+      "uuid": "arkau-o0j2j-x2b4rdadxs2fizn",
+      "owner_uuid": "arkau-j7d0g-publicfavorites",
+      "created_at": "2021-10-27T12:00:06.607794000Z",
+      "modified_by_client_uuid": null,
+      "modified_by_user_uuid": "arlog-tpzed-fyiau9qwo7ytntu",
+      "modified_at": "2021-10-27T12:00:06.609840000Z",
+      "tail_uuid": "arkau-j7d0g-publicfavorites",
+      "link_class": "star",
+      "name": "pRED Data Commons Service - Open access",
+      "head_uuid": "arkau-j7d0g-sfhw8b1uson0hwh",
+      "head_kind": "arvados#group",
+      "tail_kind": "arvados#group",
+      "properties": {}
+    },
+    {
+      "href": "/links/arkau-o0j2j-r5am4lz9gnu488k",
+      "kind": "arvados#link",
+      "etag": "9nt0c2xn5oz1jzjzawlycmehz",
+      "uuid": "arkau-o0j2j-r5am4lz9gnu488k",
+      "owner_uuid": "arkau-j7d0g-publicfavorites",
+      "created_at": "2021-06-23T14:58:06.189520000Z",
+      "modified_by_client_uuid": null,
+      "modified_by_user_uuid": "arlog-tpzed-xzjyeljl6co7vlz",
+      "modified_at": "2021-06-23T14:58:06.196208000Z",
+      "tail_uuid": "arkau-j7d0g-publicfavorites",
+      "link_class": "star",
+      "name": "Open Targets Genetics",
+      "head_uuid": "arkau-j7d0g-pj5wysmpy5wn8yo",
+      "head_kind": "arvados#group",
+      "tail_kind": "arvados#group",
+      "properties": {}
+    }
+  ],
+  "items_available": 2
+}
\ No newline at end of file

commit b97c5edf08a9dea2b447459909d3e08c8abdf82f
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Dec 1 17:27:15 2021 -0500

    Merge branch '18490-redundant-updates' refs #18490
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/services/api/lib/update_permissions.rb b/services/api/lib/update_permissions.rb
index 23e60c8ed..b7e547640 100644
--- a/services/api/lib/update_permissions.rb
+++ b/services/api/lib/update_permissions.rb
@@ -118,6 +118,10 @@ as select * from compute_permission_subgraph($1, $2, $3, $4)
 
     ActiveRecord::Base.connection.exec_query "SET LOCAL enable_mergejoin to true;"
 
+    # Now that we have recomputed a set of permissions, delete any
+    # rows from the materialized_permissions table where (target_uuid,
+    # user_uuid) is not present or has perm_level=0 in the recomputed
+    # set.
     ActiveRecord::Base.connection.exec_delete %{
 delete from #{PERMISSION_VIEW} where
   target_uuid in (select target_uuid from #{temptable_perms}) and
@@ -128,10 +132,18 @@ delete from #{PERMISSION_VIEW} where
 },
                                               "update_permissions.delete"
 
+    # Now insert-or-update permissions in the recomputed set.  The
+    # WHERE clause is important to avoid redundantly updating rows
+    # that haven't actually changed.
     ActiveRecord::Base.connection.exec_query %{
 insert into #{PERMISSION_VIEW} (user_uuid, target_uuid, perm_level, traverse_owned)
   select user_uuid, target_uuid, val as perm_level, traverse_owned from #{temptable_perms} where val>0
-on conflict (user_uuid, target_uuid) do update set perm_level=EXCLUDED.perm_level, traverse_owned=EXCLUDED.traverse_owned;
+on conflict (user_uuid, target_uuid) do update
+set perm_level=EXCLUDED.perm_level, traverse_owned=EXCLUDED.traverse_owned
+where #{PERMISSION_VIEW}.user_uuid=EXCLUDED.user_uuid and
+      #{PERMISSION_VIEW}.target_uuid=EXCLUDED.target_uuid and
+       (#{PERMISSION_VIEW}.perm_level != EXCLUDED.perm_level or
+        #{PERMISSION_VIEW}.traverse_owned != EXCLUDED.traverse_owned);
 },
                                              "update_permissions.insert"
 

commit dad25927277573fd93cc3f7308c4ebe015194b10
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Fri Nov 26 13:21:34 2021 -0300

    Merge branch '18480-arvput-special-files-handling' into main. Closes #18480
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/sdk/python/arvados/commands/put.py b/sdk/python/arvados/commands/put.py
index f6f85ba69..be7cd629c 100644
--- a/sdk/python/arvados/commands/put.py
+++ b/sdk/python/arvados/commands/put.py
@@ -576,6 +576,9 @@ class ArvPutUploadJob(object):
                     files.sort()
                     for f in files:
                         filepath = os.path.join(root, f)
+                        if not os.path.isfile(filepath):
+                            self.logger.warning("Skipping non-regular file '{}'".format(filepath))
+                            continue
                         # Add its size to the total bytes count (if applicable)
                         if self.follow_links or (not os.path.islink(filepath)):
                             if self.bytes_expected is not None:
diff --git a/sdk/python/tests/test_arv_put.py b/sdk/python/tests/test_arv_put.py
index 2a71f3671..0e531dee3 100644
--- a/sdk/python/tests/test_arv_put.py
+++ b/sdk/python/tests/test_arv_put.py
@@ -14,10 +14,10 @@ from functools import partial
 import apiclient
 import ciso8601
 import datetime
-import hashlib
 import json
 import logging
 import mock
+import multiprocessing
 import os
 import pwd
 import random
@@ -31,7 +31,6 @@ import tempfile
 import time
 import unittest
 import uuid
-import yaml
 
 import arvados
 import arvados.commands.put as arv_put
@@ -294,6 +293,26 @@ class ArvPutUploadJobTest(run_test_server.TestCaseWithServers,
         shutil.rmtree(self.small_files_dir)
         shutil.rmtree(self.tempdir_with_symlink)
 
+    def test_non_regular_files_are_ignored_except_symlinks_to_dirs(self):
+        def pfunc(x):
+            with open(x, 'w') as f:
+                f.write('test')
+        fifo_filename = 'fifo-file'
+        fifo_path = os.path.join(self.tempdir_with_symlink, fifo_filename)
+        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
+        os.mkfifo(fifo_path)
+        producer = multiprocessing.Process(target=pfunc, args=(fifo_path,))
+        producer.start()
+        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])
+        cwriter.start(save_collection=False)
+        if producer.exitcode is None:
+            # If the producer is still running, kill it. This should always be
+            # before any assertion that may fail.
+            producer.terminate()
+            producer.join(1)
+        self.assertIn('linkeddir', cwriter.manifest_text())
+        self.assertNotIn(fifo_filename, cwriter.manifest_text())
+
     def test_symlinks_are_followed_by_default(self):
         self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))
         self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))

commit a0b4442a02e4767ee63f03bc355538f16fffccb1
Author: Tom Clegg <tom at curii.com>
Date:   Tue Nov 30 10:57:08 2021 -0500

    18488: Close pg connection when waiting for lock.
    
    In practice, pg_advisory_lock() calls pile up and consume database
    connection slots, even when the corresponding processes have exited.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/controller/dblock/dblock.go b/lib/controller/dblock/dblock.go
index b0d348870..1a36822d5 100644
--- a/lib/controller/dblock/dblock.go
+++ b/lib/controller/dblock/dblock.go
@@ -35,8 +35,8 @@ func (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sql
 	for ; ; time.Sleep(retryDelay) {
 		dbl.mtx.Lock()
 		if dbl.conn != nil {
-			// Already locked by another caller in this
-			// process. Wait for them to release.
+			// Another goroutine is already locked/waiting
+			// on this lock. Wait for them to release.
 			dbl.mtx.Unlock()
 			continue
 		}
@@ -52,9 +52,15 @@ func (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sql
 			dbl.mtx.Unlock()
 			continue
 		}
-		_, err = conn.ExecContext(ctx, `SELECT pg_advisory_lock($1)`, dbl.key)
+		var locked bool
+		err = conn.QueryRowContext(ctx, `SELECT pg_try_advisory_lock($1)`, dbl.key).Scan(&locked)
 		if err != nil {
-			logger.WithError(err).Infof("error getting pg_advisory_lock %d", dbl.key)
+			logger.WithError(err).Infof("error getting pg_try_advisory_lock %d", dbl.key)
+			conn.Close()
+			dbl.mtx.Unlock()
+			continue
+		}
+		if !locked {
 			conn.Close()
 			dbl.mtx.Unlock()
 			continue

commit 9e1fd027953d2d25f395144057dc9b95750a13f9
Author: Ward Vandewege <ward at curii.com>
Date:   Wed Dec 1 15:48:54 2021 -0500

    Documentation: fix AWS region name in the configuration reference and on
    the S3 configuration page.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/install/configure-s3-object-storage.html.textile.liquid b/doc/install/configure-s3-object-storage.html.textile.liquid
index e6b1e095e..e9866d510 100644
--- a/doc/install/configure-s3-object-storage.html.textile.liquid
+++ b/doc/install/configure-s3-object-storage.html.textile.liquid
@@ -48,7 +48,7 @@ Volumes are configured in the @Volumes@ section of the cluster configuration fil
 
           # Storage provider region. For Google Cloud Storage, use ""
           # or omit.
-          Region: <span class="userinput">us-east-1a</span>
+          Region: <span class="userinput">us-east-1</span>
 
           # Storage provider endpoint. For Amazon S3, use "" or
           # omit. For Google Cloud Storage, use
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 4e5fa705f..3a02308e5 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -1326,7 +1326,7 @@ Clusters:
           AccessKeyID: aaaaa
           SecretAccessKey: aaaaa
           Endpoint: ""
-          Region: us-east-1a
+          Region: us-east-1
           Bucket: aaaaa
           LocationConstraint: false
           V2Signature: false
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index b82d94809..bf7306ace 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -1332,7 +1332,7 @@ Clusters:
           AccessKeyID: aaaaa
           SecretAccessKey: aaaaa
           Endpoint: ""
-          Region: us-east-1a
+          Region: us-east-1
           Bucket: aaaaa
           LocationConstraint: false
           V2Signature: false

commit c13a243a950af570fba7b89e690d85f1004de20d
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Nov 24 09:36:19 2021 -0500

    Update provision.sh VERSION=2.3.1-1 refs #18361
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 6a5cc9ee5..194a62a8a 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -175,7 +175,7 @@ CUSTOM_CERTS_DIR="./certs"
 # The "local.params.example.*" files already set "RELEASE=production"
 # to deploy  production-ready packages
 RELEASE="production"
-VERSION="2.3.0-1"
+VERSION="2.3.1-1"
 
 # These are arvados-formula-related parameters
 # An arvados-formula tag. For a stable release, this should be a

commit 5484ed74fd1721568841ba13f392c8382ac9e46a
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Nov 24 09:28:56 2021 -0500

    Update arvbox DEFAULT_TAG to 2.3.1 refs #18361
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/tools/arvbox/bin/arvbox b/tools/arvbox/bin/arvbox
index 5afafa313..7aefe73d1 100755
--- a/tools/arvbox/bin/arvbox
+++ b/tools/arvbox/bin/arvbox
@@ -61,7 +61,7 @@ if test -z "$WORKBENCH2_BRANCH" ; then
 fi
 
 # Update this to the docker tag for the version on releases.
-DEFAULT_TAG=2.3.0
+DEFAULT_TAG=2.3.1
 
 PG_DATA="$ARVBOX_DATA/postgres"
 VAR_DATA="$ARVBOX_DATA/var"

commit dc70662c5d9cfd7f5cb05b9540a7bb2825de6bff
Author: Ward Vandewege <ward at jhvc.com>
Date:   Tue Nov 2 12:53:31 2021 -0400

    Merge branch '18313-arvbox-bootstrap-go-version'
    
    closes #18313
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/lib/install/deps.go b/lib/install/deps.go
index ff00ee1e3..714604c84 100644
--- a/lib/install/deps.go
+++ b/lib/install/deps.go
@@ -29,6 +29,7 @@ import (
 var Command cmd.Handler = &installCommand{}
 
 const devtestDatabasePassword = "insecure_arvados_test"
+const goversion = "1.17.1"
 
 type installCommand struct {
 	ClusterType    string
@@ -245,7 +246,6 @@ make install
 	}
 
 	if !prod {
-		goversion := "1.17.1"
 		if havegoversion, err := exec.Command("/usr/local/bin/go", "version").CombinedOutput(); err == nil && bytes.HasPrefix(havegoversion, []byte("go version go"+goversion+" ")) {
 			logger.Print("go " + goversion + " already installed")
 		} else {
diff --git a/lib/install/deps_go_version_test.go b/lib/install/deps_go_version_test.go
new file mode 100644
index 000000000..1a69b6e61
--- /dev/null
+++ b/lib/install/deps_go_version_test.go
@@ -0,0 +1,40 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package install
+
+import (
+	"bytes"
+	"os/exec"
+	"testing"
+
+	"gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) {
+	check.TestingT(t)
+}
+
+var _ = check.Suite(&Suite{})
+
+type Suite struct{}
+
+/*
+	TestExtractGoVersion tests the grep/awk command used in
+	tools/arvbox/bin/arvbox to extract the version of Go to install for
+	bootstrapping `arvados-server`.
+
+	If this test is changed, the arvbox code will also need to be updated.
+*/
+func (*Suite) TestExtractGoVersion(c *check.C) {
+	script := `
+  sourcepath="$(realpath ../..)"
+  (cd ${sourcepath} && grep 'const goversion =' lib/install/deps.go |awk -F'"' '{print $2}')
+	`
+	cmd := exec.Command("bash", "-")
+	cmd.Stdin = bytes.NewBufferString("set -ex -o pipefail\n" + script)
+	cmdOutput, err := cmd.Output()
+	c.Assert(err, check.IsNil)
+	c.Assert(string(cmdOutput), check.Equals, goversion+"\n")
+}
diff --git a/tools/arvbox/bin/arvbox b/tools/arvbox/bin/arvbox
index 7ae11cf5d..5afafa313 100755
--- a/tools/arvbox/bin/arvbox
+++ b/tools/arvbox/bin/arvbox
@@ -400,6 +400,9 @@ build() {
     fi
     set -e
 
+    # Get the go version we should use for bootstrapping
+    GO_VERSION=`grep 'const goversion =' $LOCAL_ARVADOS_ROOT/lib/install/deps.go |awk -F'"' '{print $2}'`
+
     if test "$1" = localdemo -o "$1" = publicdemo ; then
         BUILDTYPE=demo
     else
@@ -411,6 +414,7 @@ build() {
     fi
 
     docker build --build-arg=BUILDTYPE=$BUILDTYPE $NO_CACHE \
+	   --build-arg=go_version=$GO_VERSION \
 	   --build-arg=arvados_version=$ARVADOS_BRANCH \
 	   --build-arg=workbench2_version=$WORKBENCH2_BRANCH \
 	   --build-arg=workdir=/tools/arvbox/lib/arvbox/docker \
@@ -419,6 +423,7 @@ build() {
 	   "$LOCAL_ARVADOS_ROOT"
     docker tag $FORCE arvados/arvbox-base:$GITHEAD arvados/arvbox-base:latest
     docker build $NO_CACHE \
+	   --build-arg=go_version=$GO_VERSION \
 	   --build-arg=arvados_version=$ARVADOS_BRANCH \
 	   --build-arg=workbench2_version=$WORKBENCH2_BRANCH \
 	   -t arvados/arvbox-$BUILDTYPE:$GITHEAD \
diff --git a/tools/arvbox/lib/arvbox/docker/Dockerfile.base b/tools/arvbox/lib/arvbox/docker/Dockerfile.base
index 27757be64..c93c1a10a 100644
--- a/tools/arvbox/lib/arvbox/docker/Dockerfile.base
+++ b/tools/arvbox/lib/arvbox/docker/Dockerfile.base
@@ -21,15 +21,16 @@ RUN apt-get update && \
     build-essential ca-certificates git libpam0g-dev wget
 
 ENV GOPATH /var/lib/gopath
+ARG go_version
 
-# Get Go 1.16.9
+# Get Go
 RUN cd /usr/src && \
-    wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
-    tar xzf go1.16.9.linux-amd64.tar.gz && \
-    ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
-    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
-    ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
-    ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+    wget https://golang.org/dl/go${go_version}.linux-amd64.tar.gz && \
+    tar xzf go${go_version}.linux-amd64.tar.gz && \
+    ln -s /usr/src/go/bin/go /usr/local/bin/go-${go_version} && \
+    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${go_version} && \
+    ln -s /usr/local/bin/go-${go_version} /usr/local/bin/go && \
+    ln -s /usr/local/bin/gofmt-${go_version} /usr/local/bin/gofmt
 
 # the --mount option requires the experimental syntax enabled (enables
 # buildkit) on the first line of this file. This Dockerfile must also be built
@@ -49,15 +50,15 @@ RUN apt-get update && \
     build-essential ca-certificates git libpam0g-dev wget
 
 ENV GOPATH /var/lib/gopath
+ARG go_version
 
-# Get Go 1.16.9
 RUN cd /usr/src && \
-    wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
-    tar xzf go1.16.9.linux-amd64.tar.gz && \
-    ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
-    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
-    ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
-    ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+    wget https://golang.org/dl/go${go_version}.linux-amd64.tar.gz && \
+    tar xzf go${go_version}.linux-amd64.tar.gz && \
+    ln -s /usr/src/go/bin/go /usr/local/bin/go-${go_version} && \
+    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-${go_version} && \
+    ln -s /usr/local/bin/go-${go_version} /usr/local/bin/go && \
+    ln -s /usr/local/bin/gofmt-${go_version} /usr/local/bin/gofmt
 
 ARG arvados_version
 RUN echo arvados_version is git commit $arvados_version

commit eb5b089f8e296b2b9575fa8fbb59724508e9342b
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 29 09:29:29 2021 -0400

    We now rely on Go 1.16. Unbreak the arvbox build toolchain by switching
    to upstream Golang, the buster-backports version is too old.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/tools/arvbox/lib/arvbox/docker/Dockerfile.base b/tools/arvbox/lib/arvbox/docker/Dockerfile.base
index 455665256..27757be64 100644
--- a/tools/arvbox/lib/arvbox/docker/Dockerfile.base
+++ b/tools/arvbox/lib/arvbox/docker/Dockerfile.base
@@ -16,17 +16,21 @@ ARG BUILDTYPE
 FROM debian:10-slim as dev
 ENV DEBIAN_FRONTEND noninteractive
 
-RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list
-
 RUN apt-get update && \
     apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
-    golang -t buster-backports
-
-RUN apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
-    build-essential ca-certificates git libpam0g-dev
+    build-essential ca-certificates git libpam0g-dev wget
 
 ENV GOPATH /var/lib/gopath
 
+# Get Go 1.16.9
+RUN cd /usr/src && \
+    wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
+    tar xzf go1.16.9.linux-amd64.tar.gz && \
+    ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
+    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
+    ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
+    ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+
 # the --mount option requires the experimental syntax enabled (enables
 # buildkit) on the first line of this file. This Dockerfile must also be built
 # with the DOCKER_BUILDKIT=1 environment variable set.
@@ -40,17 +44,21 @@ RUN --mount=type=bind,target=/usr/src/arvados \
 FROM debian:10-slim as demo
 ENV DEBIAN_FRONTEND noninteractive
 
-RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/backports.list
-
 RUN apt-get update && \
     apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
-    golang -t buster-backports
-
-RUN apt-get -yq --no-install-recommends -o Acquire::Retries=6 install \
-    build-essential ca-certificates git libpam0g-dev
+    build-essential ca-certificates git libpam0g-dev wget
 
 ENV GOPATH /var/lib/gopath
 
+# Get Go 1.16.9
+RUN cd /usr/src && \
+    wget https://golang.org/dl/go1.16.9.linux-amd64.tar.gz && \
+    tar xzf go1.16.9.linux-amd64.tar.gz && \
+    ln -s /usr/src/go/bin/go /usr/local/bin/go-1.16.9 && \
+    ln -s /usr/src/go/bin/gofmt /usr/local/bin/gofmt-1.16.9 && \
+    ln -s /usr/local/bin/go-1.16.9 /usr/local/bin/go && \
+    ln -s /usr/local/bin/gofmt-1.16.9 /usr/local/bin/gofmt
+
 ARG arvados_version
 RUN echo arvados_version is git commit $arvados_version
 

commit 88648f2b3f9e85fe4094af7d2805439c1a99b9de
Author: Javier Bértoli <jbertoli at curii.com>
Date:   Thu Oct 21 16:00:13 2021 -0300

    Merge branch '17742-provide-custom-certs'
    
    closes #17742
    Arvados-DCO-1.1-Signed-off-by: Javier Bértoli <jbertoli at curii.com>

diff --git a/doc/_includes/_install_custom_certificates.liquid b/doc/_includes/_install_custom_certificates.liquid
new file mode 100644
index 000000000..74bc009b8
--- /dev/null
+++ b/doc/_includes/_install_custom_certificates.liquid
@@ -0,0 +1,26 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+If you plan to use custom certificates, please set the variable <i>USE_LETSENCRYPT=no</i> and copy your certificates to the directory specified with the variable @CUSTOM_CERTS_DIR@ (usually "./certs") in the remote directory where you copied the @provision.sh@ script. From this dir, the provision script will install the certificates required for the role you're installing.
+
+The script expects cert/key files with these basenames (matching the role except for <i>keepweb</i>, which is split in both <i>download / collections</i>):
+
+* "controller"
+* "websocket"
+* "workbench"
+* "workbench2"
+* "webshell"
+* "download"         # Part of keepweb
+* "collections"      # Part of keepweb
+* "keepproxy"
+
+Ie., for 'keepproxy', the script will lookup for
+
+<notextile>
+<pre><code>${CUSTOM_CERTS_DIR}/keepproxy.crt
+${CUSTOM_CERTS_DIR}/keepproxy.key
+</code></pre>
+</notextile>
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index fdfd05ead..e497240c4 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -97,7 +97,9 @@ cp -r config_examples/multi_host/aws local_config_dir
 
 Edit the variables in the <i>local.params</i> file. Pay attention to the <b>*_INT_IP, *_TOKEN</b> and <b>*KEY</b> variables. Those variables will be used to do a search and replace on the <i>pillars/*</i> in place of any matching __VARIABLE__.
 
-The <i>multi_host</i> include LetsEncrypt salt code to automatically request and install the certificates for the public-facing hosts (API/controller, Workbench, Keepproxy/Keepweb) using AWS' Route53. If you will provide custom certificates, please set the variable <i>USE_LETSENCRYPT=no</i>.
+The <i>multi_host</i> example includes Let's Encrypt salt code to automatically request and install the certificates for the public-facing hosts (API/controller, Workbench, Keepproxy/Keepweb) using AWS' Route53.
+
+{% include 'install_custom_certificates' %}
 
 h3(#further_customization). Further customization of the installation (modifying the salt pillars and states)
 
diff --git a/doc/install/salt-single-host.html.textile.liquid b/doc/install/salt-single-host.html.textile.liquid
index 6ca6738e3..9147f25a1 100644
--- a/doc/install/salt-single-host.html.textile.liquid
+++ b/doc/install/salt-single-host.html.textile.liquid
@@ -55,6 +55,12 @@ cp -r config_examples/single_host/single_hostname local_config_dir
 
 Edit the variables in the <i>local.params</i> file. Pay attention to the <b>*_PORT, *_TOKEN</b> and <b>*KEY</b> variables.
 
+The <i>single_host</i> examples use self-signed SSL certificates, which are deployed using the same mechanism used to deploy custom certificates.
+
+{% include 'install_custom_certificates' %}
+
+If you want to use valid certificates provided by Let's Encrypt, please set the variable <i>USE_LETSENCRYPT=yes</i> and make sure that all the FQDNs that you will use for the public-facing applications (API/controller, Workbench, Keepproxy/Keepweb) are reachable.
+
 h3(#single_host_multiple_hostnames). Single host / multiple hostnames (Alternative configuration)
 <notextile>
 <pre><code>cp local.params.example.single_host_multiple_hostnames local.params
diff --git a/tools/salt-install/Vagrantfile b/tools/salt-install/Vagrantfile
index a3463bfc5..f7f8da3b1 100644
--- a/tools/salt-install/Vagrantfile
+++ b/tools/salt-install/Vagrantfile
@@ -35,14 +35,18 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
                                cp -vr /vagrant/tests /home/vagrant/tests;
                                sed 's#cluster_fixme_or_this_wont_work#harpo#g;
                                     s#domain_fixme_or_this_wont_work#local#g;
-                                    s/#\ BRANCH=\"main\"/\ BRANCH=\"main\"/g;
-                                    s#CONTROLLER_EXT_SSL_PORT=443#CONTROLLER_EXT_SSL_PORT=8443#g' \
+                                    s#CONTROLLER_EXT_SSL_PORT=443#CONTROLLER_EXT_SSL_PORT=8443#g;
+                                    s#RELEASE=\"production\"#RELEASE=\"development\"#g;
+                                    s/# VERSION=.*$/VERSION=\"latest\"/g;
+                                    s/#\ BRANCH=\"main\"/\ BRANCH=\"main\"/g' \
                                     /vagrant/local.params.example.single_host_multiple_hostnames > /tmp/local.params.single_host_multiple_hostnames"
+
      arv.vm.provision "shell",
                       path: "provision.sh",
                       args: [
                         # "--debug",
                         "--config /tmp/local.params.single_host_multiple_hostnames",
+                        "--development",
                         "--test",
                         "--vagrant"
                       ].join(" ")
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls
index 68c8512e7..1f088a8a7 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls
@@ -6,13 +6,5 @@
 ### LETSENCRYPT
 letsencrypt:
   domainsets:
-    __CLUSTER__.__DOMAIN__:
+    controller.__CLUSTER__.__DOMAIN__:
       - __CLUSTER__.__DOMAIN__
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    __CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls
index 3056b89d4..b2945e611 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls
@@ -6,13 +6,5 @@
 ### LETSENCRYPT
 letsencrypt:
   domainsets:
-    keep.__CLUSTER__.__DOMAIN__:
+    keepproxy.__CLUSTER__.__DOMAIN__:
       - keep.__CLUSTER__.__DOMAIN__
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    keep.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/keep.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/keep.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls
index c1720ad04..f95d7e619 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls
@@ -11,14 +11,3 @@ letsencrypt:
     collections.__CLUSTER__.__DOMAIN__:
       - collections.__CLUSTER__.__DOMAIN__
       - '*.collections.__CLUSTER__.__DOMAIN__'
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    download.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/download.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/download.__CLUSTER__.__DOMAIN__/privkey.pem
-    collections.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/collections.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/collections.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls
index e9d2bb018..17e6422f4 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls
@@ -8,11 +8,3 @@ letsencrypt:
   domainsets:
     webshell.__CLUSTER__.__DOMAIN__:
       - webshell.__CLUSTER__.__DOMAIN__
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    webshell.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/webshell.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/webshell.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls
index d24431fac..6515b3bd0 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls
@@ -6,13 +6,5 @@
 ### LETSENCRYPT
 letsencrypt:
   domainsets:
-    ws.__CLUSTER__.__DOMAIN__:
+    websocket.__CLUSTER__.__DOMAIN__:
       - ws.__CLUSTER__.__DOMAIN__
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    ws.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/ws.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/ws.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls
index 5aa634286..2bcf2b784 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls
@@ -8,11 +8,3 @@ letsencrypt:
   domainsets:
     workbench2.__CLUSTER__.__DOMAIN__:
       - workbench2.__CLUSTER__.__DOMAIN__
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    workbench2.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/workbench2.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/workbench2.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls
index 4620f79e3..9ef348719 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls
@@ -8,11 +8,3 @@ letsencrypt:
   domainsets:
     workbench.__CLUSTER__.__DOMAIN__:
       - workbench.__CLUSTER__.__DOMAIN__
-
-### NGINX
-nginx:
-  ### SNIPPETS
-  snippets:
-    workbench.__CLUSTER__.__DOMAIN___letsencrypt_cert.conf:
-      - ssl_certificate: /etc/letsencrypt/live/workbench.__CLUSTER__.__DOMAIN__/fullchain.pem
-      - ssl_certificate_key: /etc/letsencrypt/live/workbench.__CLUSTER__.__DOMAIN__/privkey.pem
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls
index c0b087045..9fbf90dd2 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls
@@ -13,7 +13,7 @@ nginx:
   ### SITES
   servers:
     managed:
-      arvados_api:
+      arvados_api.conf:
         enabled: true
         overwrite: true
         config:
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_collections_configuration.sls
similarity index 65%
copy from tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
copy to tools/salt-install/config_examples/multi_host/aws/pillars/nginx_collections_configuration.sls
index 9ea16bfb5..00be378c1 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_collections_configuration.sls
@@ -5,41 +5,31 @@
 
 ### NGINX
 nginx:
-  ### SERVER
-  server:
-    config:
-      ### STREAMS
-      http:
-        upstream collections_downloads_upstream:
-          - server: 'collections.internal:9002 fail_timeout=10s'
-
   servers:
     managed:
       ### DEFAULT
-      arvados_collections_download_default.conf:
+      arvados_collections_default.conf:
         enabled: true
         overwrite: true
         config:
           - server:
-            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - server_name: '~^(.*\.)?collections\.__CLUSTER__\.__DOMAIN__'
             - listen:
               - 80
-            - location /.well-known:
-              - root: /var/www
             - location /:
               - return: '301 https://$host$request_uri'
 
-      ### COLLECTIONS / DOWNLOAD
-      arvados_collections_download_ssl.conf:
+      ### COLLECTIONS
+      arvados_collections_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          __CERT_REQUIRES__
         config:
           - server:
-            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - server_name: '~^(.*\.)?collections\.__CLUSTER__\.__DOMAIN__'
             - listen:
-              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+              - __KEEPWEB_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://collections_downloads_upstream'
@@ -55,6 +45,7 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
index aa11cca74..41d6e1365 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls
@@ -21,7 +21,7 @@ nginx:
   servers:
     managed:
       ### DEFAULT
-      arvados_controller_default:
+      arvados_controller_default.conf:
         enabled: true
         overwrite: true
         config:
@@ -29,14 +29,16 @@ nginx:
             - server_name: __CLUSTER__.__DOMAIN__
             - listen:
               - 80 default
+            - location /.well-known:
+              - root: /var/www
             - location /:
               - return: '301 https://$host$request_uri'
 
-      arvados_controller_ssl:
+      arvados_controller_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          cmd: create-initial-cert-__CLUSTER__.__DOMAIN__-__CLUSTER__.__DOMAIN__
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: __CLUSTER__.__DOMAIN__
@@ -54,7 +56,8 @@ nginx:
               - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
               - proxy_set_header: 'X-External-Client $external_client'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.error.log
             - client_max_body_size: 128m
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_download_configuration.sls
similarity index 57%
copy from tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
copy to tools/salt-install/config_examples/multi_host/aws/pillars/nginx_download_configuration.sls
index 9ea16bfb5..9246fc11c 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_download_configuration.sls
@@ -5,41 +5,31 @@
 
 ### NGINX
 nginx:
-  ### SERVER
-  server:
-    config:
-      ### STREAMS
-      http:
-        upstream collections_downloads_upstream:
-          - server: 'collections.internal:9002 fail_timeout=10s'
-
   servers:
     managed:
       ### DEFAULT
-      arvados_collections_download_default.conf:
+      arvados_download_default.conf:
         enabled: true
         overwrite: true
         config:
           - server:
-            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - server_name: download.__CLUSTER__.__DOMAIN__
             - listen:
               - 80
-            - location /.well-known:
-              - root: /var/www
             - location /:
               - return: '301 https://$host$request_uri'
 
-      ### COLLECTIONS / DOWNLOAD
-      arvados_collections_download_ssl.conf:
+      ### DOWNLOAD
+      arvados_download_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          __CERT_REQUIRES__
         config:
           - server:
-            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - server_name: download.__CLUSTER__.__DOMAIN__
             - listen:
-              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+              - __KEEPWEB_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://collections_downloads_upstream'
@@ -55,6 +45,7 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
-            - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
-            - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
+            - access_log: /var/log/nginx/download.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/download.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls
index fac97f3c6..2f00524f9 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls
@@ -16,7 +16,7 @@ nginx:
   servers:
     managed:
       ### DEFAULT
-      arvados_keepproxy_default:
+      arvados_keepproxy_default.conf:
         enabled: true
         overwrite: true
         config:
@@ -27,16 +27,16 @@ nginx:
             - location /:
               - return: '301 https://$host$request_uri'
 
-      arvados_keepproxy_ssl:
+      arvados_keepproxy_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          cmd: create-initial-cert-keep.__CLUSTER__.__DOMAIN__-keep.__CLUSTER__.__DOMAIN__
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: keep.__CLUSTER__.__DOMAIN__
             - listen:
-              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+              - __KEEP_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /:
               - proxy_pass: 'http://keepproxy_upstream'
@@ -53,6 +53,7 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/keep.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls
index e99295353..441140e80 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls
@@ -3,6 +3,7 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
+# Keepweb upstream is common to both downloads and collections
 ### NGINX
 nginx:
   ### SERVER
@@ -12,77 +13,3 @@ nginx:
       http:
         upstream collections_downloads_upstream:
           - server: 'localhost:9002 fail_timeout=10s'
-
-  servers:
-    managed:
-      ### DEFAULT
-      arvados_collections_download_default:
-        enabled: true
-        overwrite: true
-        config:
-          - server:
-            - server_name: '~^((.*\.)?collections|download)\.__CLUSTER__\.__DOMAIN__'
-            - listen:
-              - 80
-            - location /:
-              - return: '301 https://$host$request_uri'
-
-      ### COLLECTIONS
-      arvados_collections_ssl:
-        enabled: true
-        overwrite: true
-        requires:
-          cmd: 'create-initial-cert-collections.__CLUSTER__.__DOMAIN__-collections.__CLUSTER__.__DOMAIN__+*.__CLUSTER__.__DOMAIN__'
-        config:
-          - server:
-            - server_name: '*.collections.__CLUSTER__.__DOMAIN__'
-            - listen:
-              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
-            - index: index.html index.htm
-            - location /:
-              - proxy_pass: 'http://collections_downloads_upstream'
-              - proxy_read_timeout: 90
-              - proxy_connect_timeout: 90
-              - proxy_redirect: 'off'
-              - proxy_set_header: X-Forwarded-Proto https
-              - proxy_set_header: 'Host $http_host'
-              - proxy_set_header: 'X-Real-IP $remote_addr'
-              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
-              - proxy_buffering: 'off'
-            - client_max_body_size: 0
-            - proxy_http_version: '1.1'
-            - proxy_request_buffering: 'off'
-            - include: snippets/ssl_hardening_default.conf
-            - include: snippets/collections.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
-            - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
-            - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
-
-      ### DOWNLOAD
-      arvados_download_ssl:
-        enabled: true
-        overwrite: true
-        requires:
-          cmd: create-initial-cert-download.__CLUSTER__.__DOMAIN__-download.__CLUSTER__.__DOMAIN__
-        config:
-          - server:
-            - server_name: download.__CLUSTER__.__DOMAIN__
-            - listen:
-              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
-            - index: index.html index.htm
-            - location /:
-              - proxy_pass: 'http://collections_downloads_upstream'
-              - proxy_read_timeout: 90
-              - proxy_connect_timeout: 90
-              - proxy_redirect: 'off'
-              - proxy_set_header: X-Forwarded-Proto https
-              - proxy_set_header: 'Host $http_host'
-              - proxy_set_header: 'X-Real-IP $remote_addr'
-              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
-              - proxy_buffering: 'off'
-            - client_max_body_size: 0
-            - proxy_http_version: '1.1'
-            - proxy_request_buffering: 'off'
-            - include: snippets/ssl_hardening_default.conf
-            - include: snippets/download.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
-            - access_log: /var/log/nginx/download.__CLUSTER__.__DOMAIN__.access.log combined
-            - error_log: /var/log/nginx/download.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls
index 49c86dd31..f2c88c83c 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls
@@ -17,7 +17,7 @@ nginx:
   ### SITES
   servers:
     managed:
-      arvados_webshell_default:
+      arvados_webshell_default.conf:
         enabled: true
         overwrite: true
         config:
@@ -28,16 +28,16 @@ nginx:
             - location /:
               - return: '301 https://$host$request_uri'
 
-      arvados_webshell_ssl:
+      arvados_webshell_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          cmd: create-initial-cert-webshell.__CLUSTER__.__DOMAIN__-webshell.__CLUSTER__.__DOMAIN__
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: webshell.__CLUSTER__.__DOMAIN__
             - listen:
-              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
+              - __WEBSHELL_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
             - location /shell.__CLUSTER__.__DOMAIN__:
               - proxy_pass: 'http://webshell_upstream'
@@ -69,7 +69,8 @@ nginx:
                 - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
 
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/webshell.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log
 
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls
index c9671cd0c..9658c620c 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls
@@ -16,7 +16,7 @@ nginx:
   servers:
     managed:
       ### DEFAULT
-      arvados_websocket_default:
+      arvados_websocket_default.conf:
         enabled: true
         overwrite: true
         config:
@@ -27,11 +27,11 @@ nginx:
             - location /:
               - return: '301 https://$host$request_uri'
 
-      arvados_websocket_ssl:
+      arvados_websocket_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          cmd: create-initial-cert-ws.__CLUSTER__.__DOMAIN__-ws.__CLUSTER__.__DOMAIN__
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: ws.__CLUSTER__.__DOMAIN__
@@ -54,6 +54,7 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/ws.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls
index bd4123539..a821b521f 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls
@@ -14,7 +14,7 @@ nginx:
   servers:
     managed:
       ### DEFAULT
-      arvados_workbench2_default:
+      arvados_workbench2_default.conf:
         enabled: true
         overwrite: true
         config:
@@ -25,11 +25,11 @@ nginx:
             - location /:
               - return: '301 https://$host$request_uri'
 
-      arvados_workbench2_ssl:
+      arvados_workbench2_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          cmd: create-initial-cert-workbench2.__CLUSTER__.__DOMAIN__-workbench2.__CLUSTER__.__DOMAIN__
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: workbench2.__CLUSTER__.__DOMAIN__
@@ -44,6 +44,7 @@ nginx:
             - location /config.json:
               - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__"}' ~ "'" }}
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/workbench2.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls
index ec28b98c6..32904a12b 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls
@@ -23,7 +23,7 @@ nginx:
   servers:
     managed:
       ### DEFAULT
-      arvados_workbench_default:
+      arvados_workbench_default.conf:
         enabled: true
         overwrite: true
         config:
@@ -34,11 +34,11 @@ nginx:
             - location /:
               - return: '301 https://$host$request_uri'
 
-      arvados_workbench_ssl:
+      arvados_workbench_ssl.conf:
         enabled: true
         overwrite: true
         requires:
-          cmd: create-initial-cert-workbench.__CLUSTER__.__DOMAIN__-workbench.__CLUSTER__.__DOMAIN__
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: workbench.__CLUSTER__.__DOMAIN__
@@ -55,7 +55,8 @@ nginx:
               - proxy_set_header: 'X-Real-IP $remote_addr'
               - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/workbench.__CLUSTER__.__DOMAIN___letsencrypt_cert[.]conf
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
             - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log
 
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
index ccf6bac78..81d324fcb 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls
@@ -83,8 +83,8 @@ arvados:
     tls:
       # certificate: ''
       # key: ''
-      # required to test with arvados-snakeoil certs
-      insecure: true
+      # When using arvados-snakeoil certs set insecure: true
+      insecure: false
 
     resources:
       virtual_machines:
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls
index 195e9af82..22838fe14 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls
@@ -37,7 +37,7 @@ nginx:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          __CERT_REQUIRES__
         config:
           - server:
             - server_name: __CLUSTER__.__DOMAIN__
@@ -55,7 +55,8 @@ nginx:
               - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
               - proxy_set_header: 'X-External-Client $external_client'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
-            - access_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.access.log combined
-            - error_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.error.log
+            - ssl_certificate: __CERT_PEM__
+            - ssl_certificate_key: __CERT_KEY__
+            - access_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.error.log
             - client_max_body_size: 128m
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls
index 91179d4a8..89412e424 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls
@@ -33,7 +33,7 @@ nginx:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          file: extra_custom_certs_file_copy_arvados-keepproxy.pem
         config:
           - server:
             - server_name: keep.__CLUSTER__.__DOMAIN__
@@ -55,6 +55,7 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
+            - ssl_certificate: /etc/nginx/ssl/arvados-keepproxy.pem
+            - ssl_certificate_key: /etc/nginx/ssl/arvados-keepproxy.key
             - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
index 9ea16bfb5..5859d4cfa 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls
@@ -30,14 +30,19 @@ nginx:
               - return: '301 https://$host$request_uri'
 
       ### COLLECTIONS / DOWNLOAD
-      arvados_collections_download_ssl.conf:
+      {%- for vh in [
+        'collections',
+        'download'
+        ]
+      %}
+      arvados_{{ vh }}.conf:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          file: extra_custom_certs_file_copy_arvados-{{ vh }}.pem
         config:
           - server:
-            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__
+            - server_name: {{ vh }}.__CLUSTER__.__DOMAIN__
             - listen:
               - __CONTROLLER_EXT_SSL_PORT__ http2 ssl
             - index: index.html index.htm
@@ -55,6 +60,8 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
-            - access_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.access.log combined
-            - error_log: /var/log/nginx/collections.__CLUSTER__.__DOMAIN__.error.log
+            - ssl_certificate: /etc/nginx/ssl/arvados-{{ vh }}.pem
+            - ssl_certificate_key: /etc/nginx/ssl/arvados-{{ vh }}.key
+            - access_log: /var/log/nginx/{{ vh }}.__CLUSTER__.__DOMAIN__.access.log combined
+            - error_log: /var/log/nginx/{{ vh }}.__CLUSTER__.__DOMAIN__.error.log
+      {%- endfor %}
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls
index a4d3c34f2..4ad14d33f 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_passenger.sls
@@ -62,10 +62,6 @@ nginx:
       # replace with the IP address of your resolver
       # - resolver: 127.0.0.1
 
-    arvados-snakeoil.conf:
-      - ssl_certificate: /etc/ssl/private/arvados-snakeoil-cert.pem
-      - ssl_certificate_key: /etc/ssl/private/arvados-snakeoil-cert.key
-
   ### SITES
   servers:
     managed:
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls
index 9b73ab4a0..1afc7ab80 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls
@@ -55,7 +55,7 @@ nginx:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          file: extra_custom_certs_file_copy_arvados-webshell.pem
         config:
           - server:
             - server_name: webshell.__CLUSTER__.__DOMAIN__
@@ -94,7 +94,8 @@ nginx:
                 - add_header: "'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'"
             {%- endfor %}
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
+            - ssl_certificate: /etc/nginx/ssl/arvados-webshell.pem
+            - ssl_certificate_key: /etc/nginx/ssl/arvados-webshell.key
             - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log
 
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls
index bcd0457c9..2a1f24183 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls
@@ -33,7 +33,7 @@ nginx:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          file: extra_custom_certs_file_copy_arvados-websocket.pem
         config:
           - server:
             - server_name: ws.__CLUSTER__.__DOMAIN__
@@ -56,6 +56,7 @@ nginx:
             - proxy_http_version: '1.1'
             - proxy_request_buffering: 'off'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
+            - ssl_certificate: /etc/nginx/ssl/arvados-websocket.pem
+            - ssl_certificate_key: /etc/nginx/ssl/arvados-websocket.key
             - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls
index 44bd16fe3..50c960cbc 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls
@@ -37,7 +37,7 @@ nginx:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          file: extra_custom_certs_file_copy_arvados-workbench2.pem
         config:
           - server:
             - server_name: workbench2.__CLUSTER__.__DOMAIN__
@@ -52,6 +52,7 @@ nginx:
             - location /config.json:
               - return: {{ "200 '" ~ '{"API_HOST":"__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__"}' ~ "'" }}
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
+            - ssl_certificate: /etc/nginx/ssl/arvados-workbench2.pem
+            - ssl_certificate_key: /etc/nginx/ssl/arvados-workbench2.key
             - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls
index 6b7ab969f..90248fcb2 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls
@@ -46,7 +46,7 @@ nginx:
         enabled: true
         overwrite: true
         requires:
-          file: nginx_snippet_arvados-snakeoil.conf
+          file: extra_custom_certs_file_copy_arvados-workbench.pem
         config:
           - server:
             - server_name: workbench.__CLUSTER__.__DOMAIN__
@@ -63,7 +63,8 @@ nginx:
               - proxy_set_header: 'X-Real-IP $remote_addr'
               - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'
             - include: snippets/ssl_hardening_default.conf
-            - include: snippets/arvados-snakeoil.conf
+            - ssl_certificate: /etc/nginx/ssl/arvados-workbench.pem
+            - ssl_certificate_key: /etc/nginx/ssl/arvados-workbench.key
             - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined
             - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log
 
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/custom_certs.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/custom_certs.sls
new file mode 100644
index 000000000..371650339
--- /dev/null
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/custom_certs.sls
@@ -0,0 +1,31 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}
+{%- set dest_cert_dir = '/etc/nginx/ssl' %}
+{%- set certs = salt['pillar.get']('extra_custom_certs', [])  %}
+
+extra_custom_certs_file_directory_certs_dir:
+  file.directory:
+    - name: /etc/nginx/ssl
+    - require:
+      - pkg: nginx_install
+
+{%- for cert in certs %}
+  {%- set cert_file = 'arvados-' ~ cert ~ '.pem' %}
+  {#- set csr_file = 'arvados-' ~ cert ~ '.csr' #}
+  {%- set key_file = 'arvados-' ~ cert ~ '.key' %}
+  {% for c in [cert_file, key_file] %}
+extra_custom_certs_file_copy_{{ c }}:
+  file.copy:
+    - name: {{ dest_cert_dir }}/{{ c }}
+    - source: {{ orig_cert_dir }}/{{ c }}
+    - force: true
+    - user: root
+    - group: root
+    - unless: cmp {{ dest_cert_dir }}/{{ c }} {{ orig_cert_dir }}/{{ c }}
+    - require:
+      - file: extra_custom_certs_file_directory_certs_dir
+  {%- endfor %}
+{%- endfor %}
diff --git a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls
index 4aa9bb62e..8f2fda45b 100644
--- a/tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls
+++ b/tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls
@@ -2,11 +2,16 @@
 #
 # SPDX-License-Identifier: Apache-2.0
 
+# WARNING: This file is only used for testing purposes, and should not be used
+# in a production environment
+
 {%- set curr_tpldir = tpldir %}
 {%- set tpldir = 'arvados' %}
 {%- from "arvados/map.jinja" import arvados with context %}
 {%- set tpldir = curr_tpldir %}
 
+{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}
+
 include:
   - nginx.passenger
   - nginx.config
@@ -16,21 +21,26 @@ include:
 # we'll keep it simple here.
 {%- set arvados_ca_cert_file = '/etc/ssl/private/arvados-snakeoil-ca.pem' %}
 {%- set arvados_ca_key_file = '/etc/ssl/private/arvados-snakeoil-ca.key' %}
-{%- set arvados_cert_file = '/etc/ssl/private/arvados-snakeoil-cert.pem' %}
-{%- set arvados_csr_file = '/etc/ssl/private/arvados-snakeoil-cert.csr' %}
-{%- set arvados_key_file = '/etc/ssl/private/arvados-snakeoil-cert.key' %}
 
 {%- if grains.get('os_family') == 'Debian' %}
   {%- set arvados_ca_cert_dest = '/usr/local/share/ca-certificates/arvados-snakeoil-ca.crt' %}
   {%- set update_ca_cert = '/usr/sbin/update-ca-certificates' %}
   {%- set openssl_conf = '/etc/ssl/openssl.cnf' %}
+
+extra_snakeoil_certs_ssl_cert_pkg_installed:
+  pkg.installed:
+    - name: ssl-cert
+    - require_in:
+      - sls: postgres
+
 {%- else %}
   {%- set arvados_ca_cert_dest = '/etc/pki/ca-trust/source/anchors/arvados-snakeoil-ca.pem' %}
   {%- set update_ca_cert = '/usr/bin/update-ca-trust' %}
   {%- set openssl_conf = '/etc/pki/tls/openssl.cnf' %}
+
 {%- endif %}
 
-arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed:
+extra_snakeoil_certs_dependencies_pkg_installed:
   pkg.installed:
     - pkgs:
       - openssl
@@ -41,15 +51,15 @@ arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_in
 # random generator, cf
 #   https://github.com/openssl/openssl/issues/7754
 #
-arvados_test_salt_states_examples_single_host_snakeoil_certs_file_comment_etc_openssl_conf:
+extra_snakeoil_certs_file_comment_etc_openssl_conf:
   file.comment:
     - name: /etc/ssl/openssl.cnf
     - regex: ^RANDFILE.*
     - onlyif: grep -q ^RANDFILE /etc/ssl/openssl.cnf
     - require_in:
-      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run
+      - cmd: extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run
 
-arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run:
+extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run:
   # Taken from https://github.com/arvados/arvados/blob/master/tools/arvbox/lib/arvbox/docker/service/certificate/run
   cmd.run:
     - name: |
@@ -74,61 +84,82 @@ arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_c
       - test -f {{ arvados_ca_cert_file }}
       - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_ca_cert_file }}
     - require:
-      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed
+      - pkg: extra_snakeoil_certs_dependencies_pkg_installed
+
+# Create independent certs for each vhost
+{%- for vh in [
+  'collections',
+  'controller',
+  'download',
+  'keepproxy',
+  'webshell',
+  'workbench',
+  'workbench2',
+  'websocket',
+  ]
+%}
+# We're creating these in a tmp directory, so they're copied to their destination
+# with the `custom_certs` state file, as if using custom certificates.
+{%- set arvados_cert_file = orig_cert_dir ~ '/arvados-' ~ vh ~ '.pem' %}
+{%- set arvados_csr_file = orig_cert_dir ~ '/arvados-' ~ vh ~ '.csr' %}
+{%- set arvados_key_file = orig_cert_dir ~ '/arvados-' ~ vh ~ '.key' %}
 
-arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run:
+extra_snakeoil_certs_arvados_snakeoil_cert_{{ vh }}_cmd_run:
   cmd.run:
     - name: |
-        cat > /tmp/openssl.cnf <<-CNF
+        cat > /tmp/{{ vh }}.openssl.cnf <<-CNF
         [req]
         default_bits = 2048
         prompt = no
         default_md = sha256
-        req_extensions = rext
         distinguished_name = dn
+        req_extensions = rext
+        [rext]
+        subjectAltName = @alt_names
         [dn]
         C   = CC
         ST  = Some State
         L   = Some Location
-        O   = Arvados Formula
-        OU  = arvados-formula
-        CN  = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        O   = Arvados Provision Example Single Host / Multiple Hostnames
+        OU  = arvados-provision-example-single_host_multiple_hostnames
+        CN  = {{ vh }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
         emailAddress = admin@{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
-        [rext]
-        subjectAltName = @alt_names
         [alt_names]
         {%- for entry in grains.get('ipv4') %}
         IP.{{ loop.index }} = {{ entry }}
         {%- endfor %}
-        {%- for entry in [
-            'keep',
-            'collections',
-            'download',
-            'ws',
-            'workbench',
-            'workbench2',
+        DNS.1 = {{ vh }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+        {%- if vh in [
+          'controller',
+          'keepproxy',
+          'websocket'
           ]
         %}
-        DNS.{{ loop.index }} = {{ entry }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
-        {%- endfor %}
-        DNS.7 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+          {%- if vh == 'controller' %}
+        DNS.2 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+          {%- elif vh == 'keepproxy' %}
+        DNS.2 = keep.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+          {%- elif vh == 'websocket' %}
+        DNS.2 = ws.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
+          {%- endif %}
+        {%- endif %}
         CNF
 
         # The req
         openssl req \
-          -config /tmp/openssl.cnf \
+          -config /tmp/{{ vh }}.openssl.cnf \
           -new \
           -nodes \
           -sha256 \
           -out {{ arvados_csr_file }} \
-          -keyout {{ arvados_key_file }} > /tmp/snake_oil_certs.output 2>&1 && \
+          -keyout {{ arvados_key_file }} > /tmp/snakeoil_certs.{{ vh }}.output 2>&1 && \
         # The cert
         openssl x509 \
           -req \
           -days 365 \
           -in {{ arvados_csr_file }} \
           -out {{ arvados_cert_file }} \
-          -extfile /tmp/openssl.cnf \
+          -extfile /tmp/{{ vh }}.openssl.cnf \
           -extensions rext \
           -CA {{ arvados_ca_cert_file }} \
           -CAkey {{ arvados_ca_key_file }} \
@@ -139,27 +170,20 @@ arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_c
       - test -f {{ arvados_key_file }}
       - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_cert_file }}
     - require:
-      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_dependencies_pkg_installed
-      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_ca_cmd_run
-    # We need this before we can add the nginx's snippet
-    - require_in:
-      - file: nginx_snippet_arvados-snakeoil.conf
-
-{%- if grains.get('os_family') == 'Debian' %}
-arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed:
-  pkg.installed:
-    - name: ssl-cert
+      - pkg: extra_snakeoil_certs_dependencies_pkg_installed
+      - cmd: extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run
     - require_in:
-      - sls: postgres
+      - file: extra_custom_certs_file_copy_arvados-{{ vh }}.pem
+      - file: extra_custom_certs_file_copy_arvados-{{ vh }}.key
 
-arvados_test_salt_states_examples_single_host_snakeoil_certs_certs_permissions_cmd_run:
+  {%- if grains.get('os_family') == 'Debian' %}
+extra_snakeoil_certs_certs_permissions_{{ vh}}_cmd_run:
   file.managed:
     - name: {{ arvados_key_file }}
     - owner: root
     - group: ssl-cert
     - require:
-      - cmd: arvados_test_salt_states_examples_single_host_snakeoil_certs_arvados_snake_oil_cert_cmd_run
-      - pkg: arvados_test_salt_states_examples_single_host_snakeoil_certs_ssl_cert_pkg_installed
-    - require_in:
-      - file: nginx_snippet_arvados-snakeoil.conf
-{%- endif %}
+      - cmd: extra_snakeoil_certs_arvados_snakeoil_cert_{{ vh }}_cmd_run
+      - pkg: extra_snakeoil_certs_ssl_cert_pkg_installed
+  {%- endif %}
+{%- endfor %}
diff --git a/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls b/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
index a45ac8d81..78a5a938f 100644
--- a/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls
@@ -72,7 +72,7 @@ arvados:
     tls:
       # certificate: ''
       # key: ''
-      # required to test with arvados-snakeoil certs
+      # When using arvados-snakeoil certs set insecure: true
       insecure: true
 
     ### TOKENS
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 283c631ec..c770c8d74 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -43,7 +43,6 @@ DATABASE_INT_IP=10.0.0.6
 SHELL_INT_IP=10.0.0.7
 
 INITIAL_USER="admin"
-INITIAL_USER_PASSWORD="password"
 
 # If not specified, the initial user email will be composed as
 # INITIAL_USER at CLUSTER.DOMAIN
@@ -64,7 +63,7 @@ DATABASE_PASSWORD=please_set_this_to_some_secure_value
 # salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to
 # automatically obtain and install SSL certificates for your instances or set this
 # variable to "no", provide and upload your own certificates to the instances and
-# modify the 'nginx_*' salt pillars accordingly
+# modify the 'nginx_*' salt pillars accordingly (see CUSTOM_CERTS_DIR below)
 USE_LETSENCRYPT="yes"
 USE_LETSENCRYPT_IAM_USER="yes"
 # For collections, we need to obtain a wildcard certificate for
@@ -76,6 +75,25 @@ LE_AWS_REGION="us-east-1"
 LE_AWS_ACCESS_KEY_ID="AKIABCDEFGHIJKLMNOPQ"
 LE_AWS_SECRET_ACCESS_KEY="thisistherandomstringthatisyoursecretkey"
 
+# If you going to provide your own certificates for Arvados, the provision script can
+# help you deploy them. In order to do that, you need to set `USE_LETSENCRYPT=no` above,
+# and copy the required certificates under the directory specified in the next line.
+# The certs will be copied from this directory by the provision script.
+CUSTOM_CERTS_DIR="./certs"
+# The script expects cert/key files with these basenames (matching the role except for
+# keepweb, which is split in both downoad/collections):
+#  "controller"
+#  "websocket"
+#  "workbench"
+#  "workbench2"
+#  "webshell"
+#  "download"         # Part of keepweb
+#  "collections"      # Part of keepweb
+#  "keep"             # Keepproxy
+# Ie., 'keep', the script will lookup for
+# ${CUSTOM_CERTS_DIR}/keep.crt
+# ${CUSTOM_CERTS_DIR}/keep.key
+
 # The directory to check for the config files (pillars, states) you want to use.
 # There are a few examples under 'config_examples'.
 # CONFIG_DIR="local_config_dir"
diff --git a/tools/salt-install/local.params.example.single_host_multiple_hostnames b/tools/salt-install/local.params.example.single_host_multiple_hostnames
index e23634e8c..cf79fe244 100644
--- a/tools/salt-install/local.params.example.single_host_multiple_hostnames
+++ b/tools/salt-install/local.params.example.single_host_multiple_hostnames
@@ -45,9 +45,28 @@ DATABASE_PASSWORD=please_set_this_to_some_secure_value
 # salt formula (https://github.com/saltstack-formulas/letsencrypt-formula) to try to
 # automatically obtain and install SSL certificates for your instances or set this
 # variable to "no", provide and upload your own certificates to the instances and
-# modify the 'nginx_*' salt pillars accordingly
+# modify the 'nginx_*' salt pillars accordingly (see CUSTOM_CERTS_DIR below)
 USE_LETSENCRYPT="no"
 
+# If you going to provide your own certificates for Arvados, the provision script can
+# help you deploy them. In order to do that, you need to set `USE_LETSENCRYPT=no` above,
+# and copy the required certificates under the directory specified in the next line.
+# The certs will be copied from this directory by the provision script.
+CUSTOM_CERTS_DIR="./certs"
+# The script expects cert/key files with these basenames (matching the role except for
+# keepweb, which is split in both downoad/collections):
+#  "controller"
+#  "websocket"
+#  "workbench"
+#  "workbench2"
+#  "webshell"
+#  "download"         # Part of keepweb
+#  "collections"      # Part of keepweb
+#  "keepproxy"
+# Ie., 'keepproxy', the script will lookup for
+# ${CUSTOM_CERTS_DIR}/keepproxy.crt
+# ${CUSTOM_CERTS_DIR}/keepproxy.key
+
 # The directory to check for the config files (pillars, states) you want to use.
 # There are a few examples under 'config_examples'.
 # CONFIG_DIR="local_config_dir"
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index cb75610a2..6a5cc9ee5 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -49,6 +49,7 @@ usage() {
   echo >&2 "                                                  for the selected role/s"
   echo >&2 "                                                - writes the resulting files into <dest_dir>"
   echo >&2 "  -v, --vagrant                               Run in vagrant and use the /vagrant shared dir"
+  echo >&2 "  --development                               Run in dev mode, using snakeoil certs"
   echo >&2
 }
 
@@ -60,7 +61,7 @@ arguments() {
   fi
 
   TEMP=$(getopt -o c:dhp:r:tv \
-    --long config:,debug,dump-config:,help,roles:,test,vagrant \
+    --long config:,debug,development,dump-config:,help,roles:,test,vagrant \
     -n "${0}" -- "${@}")
 
   if [ ${?} != 0 ];
@@ -98,6 +99,10 @@ arguments() {
         DUMP_CONFIG="yes"
         shift 2
         ;;
+      --development)
+        DEV_MODE="yes"
+        shift 1
+        ;;
       -r | --roles)
         for i in ${2//,/ }
           do
@@ -131,6 +136,7 @@ arguments() {
   done
 }
 
+DEV_MODE="no"
 CONFIG_FILE="${SCRIPT_DIR}/local.params"
 CONFIG_DIR="local_config_dir"
 DUMP_CONFIG="no"
@@ -159,6 +165,9 @@ WEBSOCKET_EXT_SSL_PORT=8002
 WORKBENCH1_EXT_SSL_PORT=443
 WORKBENCH2_EXT_SSL_PORT=3001
 
+USE_LETSENCRYPT="no"
+CUSTOM_CERTS_DIR="./certs"
+
 ## These are ARVADOS-related parameters
 # For a stable release, change RELEASE "production" and VERSION to the
 # package version (including the iteration, e.g. X.Y.Z-1) of the
@@ -449,9 +458,20 @@ EOFPSLS
 
 # States, extra states
 if [ -d "${F_DIR}"/extra/extra ]; then
-  for f in $(ls "${F_DIR}"/extra/extra/*.sls); do
+  if [ "$DEV_MODE" = "yes" ]; then
+    # In dev mode, we create some snake oil certs that we'll
+    # use as CUSTOM_CERTS, so we don't skip the states file
+    SKIP_SNAKE_OIL="dont_snakeoil_certs"
+  else
+    SKIP_SNAKE_OIL="snakeoil_certs"
+  fi
+  for f in $(ls "${F_DIR}"/extra/extra/*.sls | grep -v ${SKIP_SNAKE_OIL}); do
   echo "    - extra.$(basename ${f} | sed 's/.sls$//g')" >> ${S_DIR}/top.sls
   done
+  # Use custom certs
+  if [ "x${USE_LETSENCRYPT}" != "xyes" ]; then
+    mkdir -p "${F_DIR}"/extra/extra/files
+  fi
 fi
 
 # If we want specific roles for a node, just add the desired states
@@ -461,11 +481,21 @@ if [ -z "${ROLES}" ]; then
   echo "    - nginx.passenger" >> ${S_DIR}/top.sls
   # Currently, only available on config_examples/multi_host/aws
   if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
-    if [ "x${USE_LETSENCRYPT_IAM_USER}" = "xyes" ]; then
-      grep -q "aws_credentials" ${S_DIR}/top.sls || echo "    - aws_credentials" >> ${S_DIR}/top.sls
+    if [ "x${USE_LETSENCRYPT_IAM_USER}" != "xyes" ]; then
+      grep -q "aws_credentials" ${S_DIR}/top.sls || echo "    - extra.aws_credentials" >> ${S_DIR}/top.sls
     fi
     grep -q "letsencrypt"     ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+  else
+    # Use custom certs
+    # Copy certs to formula extra/files
+    # In dev mode, the files will be created and put in the destination directory by the
+    # snakeoil_certs.sls state file
+    mkdir -p /srv/salt/certs
+    cp -rv ${CUSTOM_CERTS_DIR}/* /srv/salt/certs/
+    # We add the custom_certs state
+    grep -q "custom_certs"    ${S_DIR}/top.sls || echo "    - extra.custom_certs" >> ${S_DIR}/top.sls
   fi
+
   echo "    - postgres" >> ${S_DIR}/top.sls
   echo "    - docker.software" >> ${S_DIR}/top.sls
   echo "    - arvados" >> ${S_DIR}/top.sls
@@ -482,12 +512,37 @@ if [ -z "${ROLES}" ]; then
   echo "    - nginx_workbench2_configuration" >> ${P_DIR}/top.sls
   echo "    - nginx_workbench_configuration" >> ${P_DIR}/top.sls
   echo "    - postgresql" >> ${P_DIR}/top.sls
+
   # Currently, only available on config_examples/multi_host/aws
   if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
-    if [ "x${USE_LETSENCRYPT_IAM_USER}" = "xyes" ]; then
+    if [ "x${USE_LETSENCRYPT_IAM_USER}" != "xyes" ]; then
       grep -q "aws_credentials" ${P_DIR}/top.sls || echo "    - aws_credentials" >> ${P_DIR}/top.sls
     fi
     grep -q "letsencrypt"     ${P_DIR}/top.sls || echo "    - letsencrypt" >> ${P_DIR}/top.sls
+
+    # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them
+    for c in controller websocket workbench workbench2 webshell download collections keepproxy; do
+      sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${c}.${CLUSTER}.${DOMAIN}*/g;
+              s#__CERT_PEM__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/fullchain.pem#g;
+              s#__CERT_KEY__#/etc/letsencrypt/live/${c}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \
+      ${P_DIR}/nginx_${c}_configuration.sls
+    done
+  else
+    # Use custom certs (either dev mode or prod)
+    grep -q "extra_custom_certs" ${P_DIR}/top.sls || echo "    - extra_custom_certs" >> ${P_DIR}/top.sls
+    # And add the certs in the custom_certs pillar
+    echo "extra_custom_certs_dir: /srv/salt/certs" > ${P_DIR}/extra_custom_certs.sls
+    echo "extra_custom_certs:" >> ${P_DIR}/extra_custom_certs.sls
+
+    for c in controller websocket workbench workbench2 webshell download collections keepproxy; do
+      grep -q ${c} ${P_DIR}/extra_custom_certs.sls || echo "  - ${c}" >> ${P_DIR}/extra_custom_certs.sls
+
+      # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them
+      sed -i "s/__CERT_REQUIRES__/file: extra_custom_certs_file_copy_arvados-${c}.pem/g;
+              s#__CERT_PEM__#/etc/nginx/ssl/arvados-${c}.pem#g;
+              s#__CERT_KEY__#/etc/nginx/ssl/arvados-${c}.key#g" \
+      ${P_DIR}/nginx_${c}_configuration.sls
+    done
   fi
 else
   # If we add individual roles, make sure we add the repo first
@@ -506,13 +561,18 @@ else
         grep -q "postgres.client" ${S_DIR}/top.sls || echo "    - postgres.client" >> ${S_DIR}/top.sls
         grep -q "nginx.passenger" ${S_DIR}/top.sls || echo "    - nginx.passenger" >> ${S_DIR}/top.sls
         ### If we don't install and run LE before arvados-api-server, it fails and breaks everything
-        ### after it so we add this here, as we are, after all, sharing the host for api and controller
+        ### after it. So we add this here as we are, after all, sharing the host for api and controller
         # Currently, only available on config_examples/multi_host/aws
         if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
-          if [ "x${USE_LETSENCRYPT_IAM_USER}" = "xyes" ]; then
+          if [ "x${USE_LETSENCRYPT_IAM_USER}" != "xyes" ]; then
             grep -q "aws_credentials" ${S_DIR}/top.sls || echo "    - aws_credentials" >> ${S_DIR}/top.sls
           fi
-          grep -q "letsencrypt"     ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+          grep -q "letsencrypt" ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+        else
+          # Use custom certs
+          cp -v ${CUSTOM_CERTS_DIR}/controller.* "${F_DIR}/extra/extra/files/"
+          # We add the custom_certs state
+          grep -q "custom_certs"    ${S_DIR}/top.sls || echo "    - extra.custom_certs" >> ${S_DIR}/top.sls
         fi
         grep -q "arvados.${R}" ${S_DIR}/top.sls    || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
         # Pillars
@@ -527,25 +587,76 @@ else
         grep -q "nginx.passenger" ${S_DIR}/top.sls || echo "    - nginx.passenger" >> ${S_DIR}/top.sls
         # Currently, only available on config_examples/multi_host/aws
         if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
-          if [ "x${USE_LETSENCRYPT_IAM_USER}" = "xyes" ]; then
+          if [ "x${USE_LETSENCRYPT_IAM_USER}" != "xyes" ]; then
             grep -q "aws_credentials" ${S_DIR}/top.sls || echo "    - aws_credentials" >> ${S_DIR}/top.sls
           fi
           grep -q "letsencrypt"     ${S_DIR}/top.sls || echo "    - letsencrypt" >> ${S_DIR}/top.sls
+        else
+          # Use custom certs, special case for keepweb
+          if [ ${R} = "keepweb" ]; then
+            cp -v ${CUSTOM_CERTS_DIR}/download.* "${F_DIR}/extra/extra/files/"
+            cp -v ${CUSTOM_CERTS_DIR}/collections.* "${F_DIR}/extra/extra/files/"
+          else
+            cp -v ${CUSTOM_CERTS_DIR}/${R}.* "${F_DIR}/extra/extra/files/"
+          fi
+          # We add the custom_certs state
+          grep -q "custom_certs"    ${S_DIR}/top.sls || echo "    - extra.custom_certs" >> ${S_DIR}/top.sls
+
         fi
         # webshell role is just a nginx vhost, so it has no state
         if [ "${R}" != "webshell" ]; then
-          grep -q "arvados.${R}" ${S_DIR}/top.sls    || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
+          grep -q "arvados.${R}" ${S_DIR}/top.sls || echo "    - arvados.${R}" >> ${S_DIR}/top.sls
         fi
         # Pillars
         grep -q "nginx_passenger" ${P_DIR}/top.sls          || echo "    - nginx_passenger" >> ${P_DIR}/top.sls
         grep -q "nginx_${R}_configuration" ${P_DIR}/top.sls || echo "    - nginx_${R}_configuration" >> ${P_DIR}/top.sls
+        # Special case for keepweb
+        if [ ${R} = "keepweb" ]; then
+          grep -q "nginx_download_configuration" ${P_DIR}/top.sls || echo "    - nginx_download_configuration" >> ${P_DIR}/top.sls
+          grep -q "nginx_collections_configuration" ${P_DIR}/top.sls || echo "    - nginx_collections_configuration" >> ${P_DIR}/top.sls
+        fi
+
         # Currently, only available on config_examples/multi_host/aws
         if [ "x${USE_LETSENCRYPT}" = "xyes" ]; then
-          if [ "x${USE_LETSENCRYPT_IAM_USER}" = "xyes" ]; then
+          if [ "x${USE_LETSENCRYPT_IAM_USER}" != "xyes" ]; then
             grep -q "aws_credentials" ${P_DIR}/top.sls || echo "    - aws_credentials" >> ${P_DIR}/top.sls
           fi
           grep -q "letsencrypt"     ${P_DIR}/top.sls || echo "    - letsencrypt" >> ${P_DIR}/top.sls
           grep -q "letsencrypt_${R}_configuration" ${P_DIR}/top.sls || echo "    - letsencrypt_${R}_configuration" >> ${P_DIR}/top.sls
+
+          # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them
+          # Special case for keepweb
+          if [ ${R} = "keepweb" ]; then
+            for kwsub in download collections; do
+              sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${kwsub}.${CLUSTER}.${DOMAIN}*/g;
+                      s#__CERT_PEM__#/etc/letsencrypt/live/${kwsub}.${CLUSTER}.${DOMAIN}/fullchain.pem#g;
+                      s#__CERT_KEY__#/etc/letsencrypt/live/${kwsub}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \
+              ${P_DIR}/nginx_${kwsub}_configuration.sls
+            done
+          else
+            sed -i "s/__CERT_REQUIRES__/cmd: create-initial-cert-${R}.${CLUSTER}.${DOMAIN}*/g;
+                    s#__CERT_PEM__#/etc/letsencrypt/live/${R}.${CLUSTER}.${DOMAIN}/fullchain.pem#g;
+                    s#__CERT_KEY__#/etc/letsencrypt/live/${R}.${CLUSTER}.${DOMAIN}/privkey.pem#g" \
+            ${P_DIR}/nginx_${R}_configuration.sls
+          fi
+        else
+          grep -q ${R} ${P_DIR}/extra_custom_certs.sls || echo "  - ${R}" >> ${P_DIR}/extra_custom_certs.sls
+
+          # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them
+          # Special case for keepweb
+          if [ ${R} = "keepweb" ]; then
+            for kwsub in download collections; do
+              sed -i "s/__CERT_REQUIRES__/file: extra_custom_certs_file_copy_arvados-${kwsub}.pem/g;
+                      s#__CERT_PEM__#/etc/nginx/ssl/arvados-${kwsub}.pem#g;
+                      s#__CERT_KEY__#/etc/nginx/ssl/arvados-${kwsub}.key#g" \
+              ${P_DIR}/nginx_${kwsub}_configuration.sls
+            done
+          else
+            sed -i "s/__CERT_REQUIRES__/file: extra_custom_certs_file_copy_arvados-${R}.pem/g;
+                    s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;
+                    s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g" \
+            ${P_DIR}/nginx_${R}_configuration.sls
+          fi
         fi
       ;;
       "shell")
@@ -610,15 +721,17 @@ fi
 # END FIXME! #16992 Temporary fix for psql call in arvados-api-server
 
 # Leave a copy of the Arvados CA so the user can copy it where it's required
-echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
-# If running in a vagrant VM, also add default user to docker group
-if [ "x${VAGRANT}" = "xyes" ]; then
-  cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
-
-  echo "Adding the vagrant user to the docker group"
-  usermod -a -G docker vagrant
-else
-  cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
+if [ "$DEV_MODE" = "yes" ]; then
+  echo "Copying the Arvados CA certificate to the installer dir, so you can import it"
+  # If running in a vagrant VM, also add default user to docker group
+  if [ "x${VAGRANT}" = "xyes" ]; then
+    cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
+
+    echo "Adding the vagrant user to the docker group"
+    usermod -a -G docker vagrant
+  else
+    cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${CLUSTER}.${DOMAIN}-arvados-snakeoil-ca.pem
+  fi
 fi
 
 # Test that the installation finished correctly

commit 82691f82adecc3baf60b392b2d295ab2381f85bc
Author: Tom Clegg <tom at curii.com>
Date:   Mon Nov 22 13:45:21 2021 -0500

    Merge branch '18298-lsf-no-suitable-hosts'
    
    refs #18298
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index 39d9b79a7..fbf25fa7a 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -39,6 +39,10 @@ h2(#v2_3_1). v2.3.1 (2021-11-19)
 
 "previous: Upgrading to 2.3.0":#v2_3_0
 
+h3. Default LSF arguments have changed
+
+If you use LSF and your configuration specifies @Containers.LSF.BsubArgumentsList@, you should update it to include the new arguments (@"-R", "select[mem>=%MMB]", ...@, see "configuration reference":{{site.baseurl}}/admin/config.html). Otherwise, containers that are too big to run on any LSF host will remain in the LSF queue instead of being cancelled.
+
 h3. Previously trashed role groups will be deleted
 
 Due to a bug in previous versions, the @DELETE@ operation on a role group caused the group to be flagged as trash in the database, but continue to grant permissions regardless. After upgrading, any role groups that had been trashed this way will be deleted. This might surprise some users if they were relying on permissions that were still in effect due to this bug. Future @DELETE@ operations on a role group will immediately delete the group and revoke the associated permissions.

commit a48908445762d574b41d611021a537c805f7f3ad
Author: Tom Clegg <tom at curii.com>
Date:   Fri Nov 19 17:29:50 2021 -0500

    Merge branch '18298-lsf-no-suitable-hosts'
    
    refs #18298
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index bbdbe6ab9..4e5fa705f 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -1053,7 +1053,7 @@ Clusters:
         # in /tmp on the compute node each time an Arvados container
         # runs. Ensure you have something in place to delete old files
         # from /tmp, or adjust the "-o" and "-e" arguments accordingly.
-        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]"]
+        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]", "-R", "select[mem>=%MMB]", "-R", "select[tmp>=%TMB]", "-R", "select[ncpus>=%C]"]
 
         # Use sudo to switch to this user account when submitting LSF
         # jobs.
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index 576eb0c00..b82d94809 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -1059,7 +1059,7 @@ Clusters:
         # in /tmp on the compute node each time an Arvados container
         # runs. Ensure you have something in place to delete old files
         # from /tmp, or adjust the "-o" and "-e" arguments accordingly.
-        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]"]
+        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]", "-R", "select[mem>=%MMB]", "-R", "select[tmp>=%TMB]", "-R", "select[ncpus>=%C]"]
 
         # Use sudo to switch to this user account when submitting LSF
         # jobs.
diff --git a/lib/lsf/dispatch.go b/lib/lsf/dispatch.go
index 6e35b7de9..537d52a07 100644
--- a/lib/lsf/dispatch.go
+++ b/lib/lsf/dispatch.go
@@ -167,7 +167,7 @@ func (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
 
 	if ctr.State != dispatch.Locked {
 		// already started by prior invocation
-	} else if _, ok := disp.lsfqueue.JobID(ctr.UUID); !ok {
+	} else if _, ok := disp.lsfqueue.Lookup(ctr.UUID); !ok {
 		disp.logger.Printf("Submitting container %s to LSF", ctr.UUID)
 		cmd := []string{disp.Cluster.Containers.CrunchRunCommand}
 		cmd = append(cmd, "--runtime-engine="+disp.Cluster.Containers.RuntimeEngine)
@@ -181,16 +181,38 @@ func (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
 	disp.logger.Printf("Start monitoring container %v in state %q", ctr.UUID, ctr.State)
 	defer disp.logger.Printf("Done monitoring container %s", ctr.UUID)
 
-	// If the container disappears from the lsf queue, there is
-	// no point in waiting for further dispatch updates: just
-	// clean up and return.
 	go func(uuid string) {
+		cancelled := false
 		for ctx.Err() == nil {
-			if _, ok := disp.lsfqueue.JobID(uuid); !ok {
+			qent, ok := disp.lsfqueue.Lookup(uuid)
+			if !ok {
+				// If the container disappears from
+				// the lsf queue, there is no point in
+				// waiting for further dispatch
+				// updates: just clean up and return.
 				disp.logger.Printf("container %s job disappeared from LSF queue", uuid)
 				cancel()
 				return
 			}
+			if !cancelled && qent.Stat == "PEND" && strings.Contains(qent.PendReason, "There are no suitable hosts for the job") {
+				disp.logger.Printf("container %s: %s", uuid, qent.PendReason)
+				err := disp.arvDispatcher.Arv.Update("containers", uuid, arvadosclient.Dict{
+					"container": map[string]interface{}{
+						"runtime_status": map[string]string{
+							"error": qent.PendReason,
+						},
+					},
+				}, nil)
+				if err != nil {
+					disp.logger.Printf("error setting runtime_status on %s: %s", uuid, err)
+					continue // retry
+				}
+				err = disp.arvDispatcher.UpdateState(uuid, dispatch.Cancelled)
+				if err != nil {
+					continue // retry (UpdateState() already logged the error)
+				}
+				cancelled = true
+			}
 		}
 	}(ctr.UUID)
 
@@ -236,10 +258,10 @@ func (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Contain
 	// from the queue.
 	ticker := time.NewTicker(5 * time.Second)
 	defer ticker.Stop()
-	for jobid, ok := disp.lsfqueue.JobID(ctr.UUID); ok; _, ok = disp.lsfqueue.JobID(ctr.UUID) {
-		err := disp.lsfcli.Bkill(jobid)
+	for qent, ok := disp.lsfqueue.Lookup(ctr.UUID); ok; _, ok = disp.lsfqueue.Lookup(ctr.UUID) {
+		err := disp.lsfcli.Bkill(qent.ID)
 		if err != nil {
-			disp.logger.Warnf("%s: bkill(%d): %s", ctr.UUID, jobid, err)
+			disp.logger.Warnf("%s: bkill(%s): %s", ctr.UUID, qent.ID, err)
 		}
 		<-ticker.C
 	}
@@ -262,10 +284,10 @@ func (disp *dispatcher) submit(container arvados.Container, crunchRunCommand []s
 }
 
 func (disp *dispatcher) bkill(ctr arvados.Container) {
-	if jobid, ok := disp.lsfqueue.JobID(ctr.UUID); !ok {
+	if qent, ok := disp.lsfqueue.Lookup(ctr.UUID); !ok {
 		disp.logger.Debugf("bkill(%s): redundant, job not in queue", ctr.UUID)
-	} else if err := disp.lsfcli.Bkill(jobid); err != nil {
-		disp.logger.Warnf("%s: bkill(%d): %s", ctr.UUID, jobid, err)
+	} else if err := disp.lsfcli.Bkill(qent.ID); err != nil {
+		disp.logger.Warnf("%s: bkill(%s): %s", ctr.UUID, qent.ID, err)
 	}
 }
 
diff --git a/lib/lsf/dispatch_test.go b/lib/lsf/dispatch_test.go
index 641453e54..c044df09f 100644
--- a/lib/lsf/dispatch_test.go
+++ b/lib/lsf/dispatch_test.go
@@ -6,6 +6,7 @@ package lsf
 
 import (
 	"context"
+	"encoding/json"
 	"fmt"
 	"math/rand"
 	"os/exec"
@@ -29,7 +30,8 @@ func Test(t *testing.T) {
 var _ = check.Suite(&suite{})
 
 type suite struct {
-	disp *dispatcher
+	disp     *dispatcher
+	crTooBig arvados.ContainerRequest
 }
 
 func (s *suite) TearDownTest(c *check.C) {
@@ -46,6 +48,22 @@ func (s *suite) SetUpTest(c *check.C) {
 	s.disp.lsfcli.stubCommand = func(string, ...string) *exec.Cmd {
 		return exec.Command("bash", "-c", "echo >&2 unimplemented stub; false")
 	}
+	err = arvados.NewClientFromEnv().RequestAndDecode(&s.crTooBig, "POST", "arvados/v1/container_requests", nil, map[string]interface{}{
+		"container_request": map[string]interface{}{
+			"runtime_constraints": arvados.RuntimeConstraints{
+				RAM:   1000000000000,
+				VCPUs: 1,
+			},
+			"container_image":     arvadostest.DockerImage112PDH,
+			"command":             []string{"sleep", "1"},
+			"mounts":              map[string]arvados.Mount{"/mnt/out": {Kind: "tmp", Capacity: 1000}},
+			"output_path":         "/mnt/out",
+			"state":               arvados.ContainerRequestStateCommitted,
+			"priority":            1,
+			"container_count_max": 1,
+		},
+	})
+	c.Assert(err, check.IsNil)
 }
 
 type lsfstub struct {
@@ -82,7 +100,10 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
 					"-J", arvadostest.LockedContainerUUID,
 					"-n", "4",
 					"-D", "11701MB",
-					"-R", "rusage[mem=11701MB:tmp=0MB] span[hosts=1]"})
+					"-R", "rusage[mem=11701MB:tmp=0MB] span[hosts=1]",
+					"-R", "select[mem>=11701MB]",
+					"-R", "select[tmp>=0MB]",
+					"-R", "select[ncpus>=4]"})
 				mtx.Lock()
 				fakejobq[nextjobid] = args[1]
 				nextjobid++
@@ -92,7 +113,23 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
 					"-J", arvadostest.QueuedContainerUUID,
 					"-n", "4",
 					"-D", "11701MB",
-					"-R", "rusage[mem=11701MB:tmp=45777MB] span[hosts=1]"})
+					"-R", "rusage[mem=11701MB:tmp=45777MB] span[hosts=1]",
+					"-R", "select[mem>=11701MB]",
+					"-R", "select[tmp>=45777MB]",
+					"-R", "select[ncpus>=4]"})
+				mtx.Lock()
+				fakejobq[nextjobid] = args[1]
+				nextjobid++
+				mtx.Unlock()
+			case s.crTooBig.ContainerUUID:
+				c.Check(args, check.DeepEquals, []string{
+					"-J", s.crTooBig.ContainerUUID,
+					"-n", "1",
+					"-D", "954187MB",
+					"-R", "rusage[mem=954187MB:tmp=256MB] span[hosts=1]",
+					"-R", "select[mem>=954187MB]",
+					"-R", "select[tmp>=256MB]",
+					"-R", "select[ncpus>=1]"})
 				mtx.Lock()
 				fakejobq[nextjobid] = args[1]
 				nextjobid++
@@ -103,13 +140,31 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
 			}
 			return exec.Command("echo", "submitted job")
 		case "bjobs":
-			c.Check(args, check.DeepEquals, []string{"-u", "all", "-noheader", "-o", "jobid stat job_name:30"})
-			out := ""
+			c.Check(args, check.DeepEquals, []string{"-u", "all", "-o", "jobid stat job_name pend_reason", "-json"})
+			var records []map[string]interface{}
 			for jobid, uuid := range fakejobq {
-				out += fmt.Sprintf(`%d %s %s\n`, jobid, "RUN", uuid)
+				stat, reason := "RUN", ""
+				if uuid == s.crTooBig.ContainerUUID {
+					// The real bjobs output includes a trailing ';' here:
+					stat, reason = "PEND", "There are no suitable hosts for the job;"
+				}
+				records = append(records, map[string]interface{}{
+					"JOBID":       fmt.Sprintf("%d", jobid),
+					"STAT":        stat,
+					"JOB_NAME":    uuid,
+					"PEND_REASON": reason,
+				})
 			}
-			c.Logf("bjobs out: %q", out)
-			return exec.Command("printf", out)
+			out, err := json.Marshal(map[string]interface{}{
+				"COMMAND": "bjobs",
+				"JOBS":    len(fakejobq),
+				"RECORDS": records,
+			})
+			if err != nil {
+				panic(err)
+			}
+			c.Logf("bjobs out: %s", out)
+			return exec.Command("printf", string(out))
 		case "bkill":
 			killid, _ := strconv.Atoi(args[0])
 			if uuid, ok := fakejobq[killid]; !ok {
@@ -137,6 +192,7 @@ func (s *suite) TestSubmit(c *check.C) {
 		sudoUser:  s.disp.Cluster.Containers.LSF.BsubSudoUser,
 	}.stubCommand(s, c)
 	s.disp.Start()
+
 	deadline := time.Now().Add(20 * time.Second)
 	for range time.NewTicker(time.Second).C {
 		if time.Now().After(deadline) {
@@ -144,23 +200,37 @@ func (s *suite) TestSubmit(c *check.C) {
 			break
 		}
 		// "queuedcontainer" should be running
-		if _, ok := s.disp.lsfqueue.JobID(arvadostest.QueuedContainerUUID); !ok {
+		if _, ok := s.disp.lsfqueue.Lookup(arvadostest.QueuedContainerUUID); !ok {
 			continue
 		}
 		// "lockedcontainer" should be cancelled because it
 		// has priority 0 (no matching container requests)
-		if _, ok := s.disp.lsfqueue.JobID(arvadostest.LockedContainerUUID); ok {
+		if _, ok := s.disp.lsfqueue.Lookup(arvadostest.LockedContainerUUID); ok {
+			continue
+		}
+		// "crTooBig" should be cancelled because lsf stub
+		// reports there is no suitable instance type
+		if _, ok := s.disp.lsfqueue.Lookup(s.crTooBig.ContainerUUID); ok {
 			continue
 		}
 		var ctr arvados.Container
 		if err := s.disp.arvDispatcher.Arv.Get("containers", arvadostest.LockedContainerUUID, nil, &ctr); err != nil {
 			c.Logf("error getting container state for %s: %s", arvadostest.LockedContainerUUID, err)
 			continue
-		}
-		if ctr.State != arvados.ContainerStateQueued {
+		} else if ctr.State != arvados.ContainerStateQueued {
 			c.Logf("LockedContainer is not in the LSF queue but its arvados record has not been updated to state==Queued (state is %q)", ctr.State)
 			continue
 		}
+
+		if err := s.disp.arvDispatcher.Arv.Get("containers", s.crTooBig.ContainerUUID, nil, &ctr); err != nil {
+			c.Logf("error getting container state for %s: %s", s.crTooBig.ContainerUUID, err)
+			continue
+		} else if ctr.State != arvados.ContainerStateCancelled {
+			c.Logf("container %s is not in the LSF queue but its arvados record has not been updated to state==Cancelled (state is %q)", s.crTooBig.ContainerUUID, ctr.State)
+			continue
+		} else {
+			c.Check(ctr.RuntimeStatus["error"], check.Equals, "There are no suitable hosts for the job;")
+		}
 		c.Log("reached desired state")
 		break
 	}
diff --git a/lib/lsf/lsfcli.go b/lib/lsf/lsfcli.go
index 9d712ee97..d17559568 100644
--- a/lib/lsf/lsfcli.go
+++ b/lib/lsf/lsfcli.go
@@ -6,6 +6,7 @@ package lsf
 
 import (
 	"bytes"
+	"encoding/json"
 	"fmt"
 	"os"
 	"os/exec"
@@ -16,9 +17,10 @@ import (
 )
 
 type bjobsEntry struct {
-	id   int
-	name string
-	stat string
+	ID         string `json:"JOBID"`
+	Name       string `json:"JOB_NAME"`
+	Stat       string `json:"STAT"`
+	PendReason string `json:"PEND_REASON"`
 }
 
 type lsfcli struct {
@@ -53,29 +55,21 @@ func (cli lsfcli) Bsub(script []byte, args []string, arv *arvados.Client) error
 
 func (cli lsfcli) Bjobs() ([]bjobsEntry, error) {
 	cli.logger.Debugf("Bjobs()")
-	cmd := cli.command("bjobs", "-u", "all", "-noheader", "-o", "jobid stat job_name:30")
+	cmd := cli.command("bjobs", "-u", "all", "-o", "jobid stat job_name pend_reason", "-json")
 	buf, err := cmd.Output()
 	if err != nil {
 		return nil, errWithStderr(err)
 	}
-	var bjobs []bjobsEntry
-	for _, line := range strings.Split(string(buf), "\n") {
-		if line == "" {
-			continue
-		}
-		var ent bjobsEntry
-		if _, err := fmt.Sscan(line, &ent.id, &ent.stat, &ent.name); err != nil {
-			cli.logger.Warnf("ignoring unparsed line in bjobs output: %q", line)
-			continue
-		}
-		bjobs = append(bjobs, ent)
+	var resp struct {
+		Records []bjobsEntry `json:"RECORDS"`
 	}
-	return bjobs, nil
+	err = json.Unmarshal(buf, &resp)
+	return resp.Records, err
 }
 
-func (cli lsfcli) Bkill(id int) error {
-	cli.logger.Infof("Bkill(%d)", id)
-	cmd := cli.command("bkill", fmt.Sprintf("%d", id))
+func (cli lsfcli) Bkill(id string) error {
+	cli.logger.Infof("Bkill(%s)", id)
+	cmd := cli.command("bkill", id)
 	buf, err := cmd.CombinedOutput()
 	if err == nil || strings.Index(string(buf), "already finished") >= 0 {
 		return nil
diff --git a/lib/lsf/lsfqueue.go b/lib/lsf/lsfqueue.go
index 3c4fc4cb8..3ed4d0c18 100644
--- a/lib/lsf/lsfqueue.go
+++ b/lib/lsf/lsfqueue.go
@@ -23,12 +23,12 @@ type lsfqueue struct {
 	latest    map[string]bjobsEntry
 }
 
-// JobID waits for the next queue update (so even a job that was only
+// Lookup waits for the next queue update (so even a job that was only
 // submitted a nanosecond ago will show up) and then returns the LSF
-// job ID corresponding to the given container UUID.
-func (q *lsfqueue) JobID(uuid string) (int, bool) {
+// queue information corresponding to the given container UUID.
+func (q *lsfqueue) Lookup(uuid string) (bjobsEntry, bool) {
 	ent, ok := q.getNext()[uuid]
-	return ent.id, ok
+	return ent, ok
 }
 
 // All waits for the next queue update, then returns the names of all
@@ -94,7 +94,7 @@ func (q *lsfqueue) init() {
 			}
 			next := make(map[string]bjobsEntry, len(ents))
 			for _, ent := range ents {
-				next[ent.name] = ent
+				next[ent.Name] = ent
 			}
 			// Replace q.latest and notify all the
 			// goroutines that the "next update" they

commit 771804a86a5ac53be1142735995dbec6f6949289
Author: Tom Clegg <tom at curii.com>
Date:   Thu Nov 18 15:01:06 2021 -0500

    Merge branch '18339-sweep-trash-lock'
    
    fixes #18339
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index d3329f783..39d9b79a7 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -39,6 +39,10 @@ h2(#v2_3_1). v2.3.1 (2021-11-19)
 
 "previous: Upgrading to 2.3.0":#v2_3_0
 
+h3. Previously trashed role groups will be deleted
+
+Due to a bug in previous versions, the @DELETE@ operation on a role group caused the group to be flagged as trash in the database, but continue to grant permissions regardless. After upgrading, any role groups that had been trashed this way will be deleted. This might surprise some users if they were relying on permissions that were still in effect due to this bug. Future @DELETE@ operations on a role group will immediately delete the group and revoke the associated permissions.
+
 h3. Users are visible to other users by default
 
 When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false at .
diff --git a/lib/controller/auth_test.go b/lib/controller/auth_test.go
index 175241146..5d477a766 100644
--- a/lib/controller/auth_test.go
+++ b/lib/controller/auth_test.go
@@ -98,7 +98,7 @@ func (s *AuthSuite) SetUpTest(c *check.C) {
 	cluster.Login.OpenIDConnect.AcceptAccessToken = true
 	cluster.Login.OpenIDConnect.AcceptAccessTokenScope = ""
 
-	s.testHandler = &Handler{Cluster: cluster}
+	s.testHandler = &Handler{Cluster: cluster, BackgroundContext: ctxlog.Context(context.Background(), s.log)}
 	s.testServer = newServerFromIntegrationTestEnv(c)
 	s.testServer.Server.BaseContext = func(net.Listener) context.Context {
 		return ctxlog.Context(context.Background(), s.log)
diff --git a/lib/controller/cmd.go b/lib/controller/cmd.go
index 7ab7f5305..96972251a 100644
--- a/lib/controller/cmd.go
+++ b/lib/controller/cmd.go
@@ -16,6 +16,6 @@ import (
 // Command starts a controller service. See cmd/arvados-server/cmd.go
 var Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)
 
-func newHandler(_ context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {
-	return &Handler{Cluster: cluster}
+func newHandler(ctx context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {
+	return &Handler{Cluster: cluster, BackgroundContext: ctx}
 }
diff --git a/lib/controller/dblock/dblock.go b/lib/controller/dblock/dblock.go
new file mode 100644
index 000000000..b0d348870
--- /dev/null
+++ b/lib/controller/dblock/dblock.go
@@ -0,0 +1,101 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package dblock
+
+import (
+	"context"
+	"database/sql"
+	"sync"
+	"time"
+
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	"github.com/jmoiron/sqlx"
+)
+
+var (
+	TrashSweep = &DBLocker{key: 10001}
+	retryDelay = 5 * time.Second
+)
+
+// DBLocker uses pg_advisory_lock to maintain a cluster-wide lock for
+// a long-running task like "do X every N seconds".
+type DBLocker struct {
+	key   int
+	mtx   sync.Mutex
+	ctx   context.Context
+	getdb func(context.Context) (*sqlx.DB, error)
+	conn  *sql.Conn // != nil if advisory lock has been acquired
+}
+
+// Lock acquires the advisory lock, waiting/reconnecting if needed.
+func (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sqlx.DB, error)) {
+	logger := ctxlog.FromContext(ctx)
+	for ; ; time.Sleep(retryDelay) {
+		dbl.mtx.Lock()
+		if dbl.conn != nil {
+			// Already locked by another caller in this
+			// process. Wait for them to release.
+			dbl.mtx.Unlock()
+			continue
+		}
+		db, err := getdb(ctx)
+		if err != nil {
+			logger.WithError(err).Infof("error getting database pool")
+			dbl.mtx.Unlock()
+			continue
+		}
+		conn, err := db.Conn(ctx)
+		if err != nil {
+			logger.WithError(err).Info("error getting database connection")
+			dbl.mtx.Unlock()
+			continue
+		}
+		_, err = conn.ExecContext(ctx, `SELECT pg_advisory_lock($1)`, dbl.key)
+		if err != nil {
+			logger.WithError(err).Infof("error getting pg_advisory_lock %d", dbl.key)
+			conn.Close()
+			dbl.mtx.Unlock()
+			continue
+		}
+		logger.Debugf("acquired pg_advisory_lock %d", dbl.key)
+		dbl.ctx, dbl.getdb, dbl.conn = ctx, getdb, conn
+		dbl.mtx.Unlock()
+		return
+	}
+}
+
+// Check confirms that the lock is still active (i.e., the session is
+// still alive), and re-acquires if needed. Panics if Lock is not
+// acquired first.
+func (dbl *DBLocker) Check() {
+	dbl.mtx.Lock()
+	err := dbl.conn.PingContext(dbl.ctx)
+	if err == nil {
+		ctxlog.FromContext(dbl.ctx).Debugf("pg_advisory_lock %d connection still alive", dbl.key)
+		dbl.mtx.Unlock()
+		return
+	}
+	ctxlog.FromContext(dbl.ctx).WithError(err).Info("database connection ping failed")
+	dbl.conn.Close()
+	dbl.conn = nil
+	ctx, getdb := dbl.ctx, dbl.getdb
+	dbl.mtx.Unlock()
+	dbl.Lock(ctx, getdb)
+}
+
+func (dbl *DBLocker) Unlock() {
+	dbl.mtx.Lock()
+	defer dbl.mtx.Unlock()
+	if dbl.conn != nil {
+		_, err := dbl.conn.ExecContext(context.Background(), `SELECT pg_advisory_unlock($1)`, dbl.key)
+		if err != nil {
+			ctxlog.FromContext(dbl.ctx).WithError(err).Infof("error releasing pg_advisory_lock %d", dbl.key)
+		} else {
+			ctxlog.FromContext(dbl.ctx).Debugf("released pg_advisory_lock %d", dbl.key)
+		}
+		dbl.conn.Close()
+		dbl.conn = nil
+	}
+}
diff --git a/lib/controller/federation/conn.go b/lib/controller/federation/conn.go
index d1bf473d7..d4155da10 100644
--- a/lib/controller/federation/conn.go
+++ b/lib/controller/federation/conn.go
@@ -525,6 +525,10 @@ func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOpti
 	return conn.chooseBackend(options.UUID).SpecimenDelete(ctx, options)
 }
 
+func (conn *Conn) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {
+	return conn.local.SysTrashSweep(ctx, options)
+}
+
 var userAttrsCachedFromLoginCluster = map[string]bool{
 	"created_at":  true,
 	"email":       true,
diff --git a/lib/controller/federation_test.go b/lib/controller/federation_test.go
index 211c76198..eb398695b 100644
--- a/lib/controller/federation_test.go
+++ b/lib/controller/federation_test.go
@@ -70,7 +70,7 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
 	cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour * 24 * 14)
 	arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "http://localhost:1/")
 	arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
-	s.testHandler = &Handler{Cluster: cluster}
+	s.testHandler = &Handler{Cluster: cluster, BackgroundContext: ctxlog.Context(context.Background(), s.log)}
 	s.testServer = newServerFromIntegrationTestEnv(c)
 	s.testServer.Server.BaseContext = func(net.Listener) context.Context {
 		return ctxlog.Context(context.Background(), s.log)
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
index b51d90911..965ba040e 100644
--- a/lib/controller/handler.go
+++ b/lib/controller/handler.go
@@ -32,9 +32,11 @@ import (
 )
 
 type Handler struct {
-	Cluster *arvados.Cluster
+	Cluster           *arvados.Cluster
+	BackgroundContext context.Context
 
 	setupOnce      sync.Once
+	federation     *federation.Conn
 	handlerStack   http.Handler
 	proxy          *proxy
 	secureClient   *http.Client
@@ -103,7 +105,8 @@ func (h *Handler) setup() {
 	healthFuncs := make(map[string]health.Func)
 
 	oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
-	rtr := router.New(federation.New(h.Cluster, &healthFuncs), router.Config{
+	h.federation = federation.New(h.Cluster, &healthFuncs)
+	rtr := router.New(h.federation, router.Config{
 		MaxRequestSize: h.Cluster.API.MaxRequestSize,
 		WrapCalls:      api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls),
 	})
@@ -152,6 +155,8 @@ func (h *Handler) setup() {
 	h.proxy = &proxy{
 		Name: "arvados-controller",
 	}
+
+	go h.trashSweepWorker()
 }
 
 var errDBConnection = errors.New("database connection error")
diff --git a/lib/controller/handler_test.go b/lib/controller/handler_test.go
index f854079f9..a456627c0 100644
--- a/lib/controller/handler_test.go
+++ b/lib/controller/handler_test.go
@@ -35,7 +35,7 @@ var _ = check.Suite(&HandlerSuite{})
 
 type HandlerSuite struct {
 	cluster *arvados.Cluster
-	handler http.Handler
+	handler *Handler
 	ctx     context.Context
 	cancel  context.CancelFunc
 }
@@ -51,7 +51,7 @@ func (s *HandlerSuite) SetUpTest(c *check.C) {
 	s.cluster.TLS.Insecure = true
 	arvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
 	arvadostest.SetServiceURL(&s.cluster.Services.Controller, "http://localhost:/")
-	s.handler = newHandler(s.ctx, s.cluster, "", prometheus.NewRegistry())
+	s.handler = newHandler(s.ctx, s.cluster, "", prometheus.NewRegistry()).(*Handler)
 }
 
 func (s *HandlerSuite) TearDownTest(c *check.C) {
@@ -276,7 +276,7 @@ func (s *HandlerSuite) TestLogoutGoogle(c *check.C) {
 
 func (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {
 	req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
-	user, ok, err := s.handler.(*Handler).validateAPItoken(req, arvadostest.ActiveToken)
+	user, ok, err := s.handler.validateAPItoken(req, arvadostest.ActiveToken)
 	c.Assert(err, check.IsNil)
 	c.Check(ok, check.Equals, true)
 	c.Check(user.Authorization.UUID, check.Equals, arvadostest.ActiveTokenUUID)
@@ -287,7 +287,7 @@ func (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {
 
 func (s *HandlerSuite) TestValidateV2APIToken(c *check.C) {
 	req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
-	user, ok, err := s.handler.(*Handler).validateAPItoken(req, arvadostest.ActiveTokenV2)
+	user, ok, err := s.handler.validateAPItoken(req, arvadostest.ActiveTokenV2)
 	c.Assert(err, check.IsNil)
 	c.Check(ok, check.Equals, true)
 	c.Check(user.Authorization.UUID, check.Equals, arvadostest.ActiveTokenUUID)
@@ -319,11 +319,11 @@ func (s *HandlerSuite) TestValidateRemoteToken(c *check.C) {
 
 func (s *HandlerSuite) TestCreateAPIToken(c *check.C) {
 	req := httptest.NewRequest("GET", "/arvados/v1/users/current", nil)
-	auth, err := s.handler.(*Handler).createAPItoken(req, arvadostest.ActiveUserUUID, nil)
+	auth, err := s.handler.createAPItoken(req, arvadostest.ActiveUserUUID, nil)
 	c.Assert(err, check.IsNil)
 	c.Check(auth.Scopes, check.DeepEquals, []string{"all"})
 
-	user, ok, err := s.handler.(*Handler).validateAPItoken(req, auth.TokenV2())
+	user, ok, err := s.handler.validateAPItoken(req, auth.TokenV2())
 	c.Assert(err, check.IsNil)
 	c.Check(ok, check.Equals, true)
 	c.Check(user.Authorization.UUID, check.Equals, auth.UUID)
@@ -430,3 +430,30 @@ func (s *HandlerSuite) TestRedactRailsAPIHostFromErrors(c *check.C) {
 	c.Check(jresp.Errors[0], check.Matches, `.*//railsapi\.internal/arvados/v1/collections/.*: 404 Not Found.*`)
 	c.Check(jresp.Errors[0], check.Not(check.Matches), `(?ms).*127.0.0.1.*`)
 }
+
+func (s *HandlerSuite) TestTrashSweep(c *check.C) {
+	s.cluster.SystemRootToken = arvadostest.SystemRootToken
+	s.cluster.Collections.TrashSweepInterval = arvados.Duration(time.Second / 10)
+	s.handler.CheckHealth()
+	ctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+	coll, err := s.handler.federation.CollectionCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{"name": "test trash sweep"}, EnsureUniqueName: true})
+	c.Assert(err, check.IsNil)
+	defer s.handler.federation.CollectionDelete(ctx, arvados.DeleteOptions{UUID: coll.UUID})
+	db, err := s.handler.db(s.ctx)
+	c.Assert(err, check.IsNil)
+	_, err = db.ExecContext(s.ctx, `update collections set trash_at = $1, delete_at = $2 where uuid = $3`, time.Now().UTC().Add(time.Second/10), time.Now().UTC().Add(time.Hour), coll.UUID)
+	c.Assert(err, check.IsNil)
+	deadline := time.Now().Add(5 * time.Second)
+	for {
+		if time.Now().After(deadline) {
+			c.Log("timed out")
+			c.FailNow()
+		}
+		updated, err := s.handler.federation.CollectionGet(ctx, arvados.GetOptions{UUID: coll.UUID, IncludeTrash: true})
+		c.Assert(err, check.IsNil)
+		if updated.IsTrashed {
+			break
+		}
+		time.Sleep(time.Second / 10)
+	}
+}
diff --git a/lib/controller/rpc/conn.go b/lib/controller/rpc/conn.go
index 25f47bc3b..736ef711e 100644
--- a/lib/controller/rpc/conn.go
+++ b/lib/controller/rpc/conn.go
@@ -572,6 +572,13 @@ func (conn *Conn) SpecimenDelete(ctx context.Context, options arvados.DeleteOpti
 	return resp, err
 }
 
+func (conn *Conn) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {
+	ep := arvados.EndpointSysTrashSweep
+	var resp struct{}
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+	return resp, err
+}
+
 func (conn *Conn) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {
 	ep := arvados.EndpointUserCreate
 	var resp arvados.User
diff --git a/lib/controller/server_test.go b/lib/controller/server_test.go
index b2b3365a2..4f3d4a568 100644
--- a/lib/controller/server_test.go
+++ b/lib/controller/server_test.go
@@ -35,11 +35,14 @@ func integrationTestCluster() *arvados.Cluster {
 // provided by the integration-testing environment.
 func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
 	log := ctxlog.TestLogger(c)
-
-	handler := &Handler{Cluster: &arvados.Cluster{
-		ClusterID:  "zzzzz",
-		PostgreSQL: integrationTestCluster().PostgreSQL,
-	}}
+	ctx := ctxlog.Context(context.Background(), log)
+	handler := &Handler{
+		Cluster: &arvados.Cluster{
+			ClusterID:  "zzzzz",
+			PostgreSQL: integrationTestCluster().PostgreSQL,
+		},
+		BackgroundContext: ctx,
+	}
 	handler.Cluster.TLS.Insecure = true
 	handler.Cluster.Collections.BlobSigning = true
 	handler.Cluster.Collections.BlobSigningKey = arvadostest.BlobSigningKey
@@ -49,10 +52,8 @@ func newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {
 
 	srv := &httpserver.Server{
 		Server: http.Server{
-			BaseContext: func(net.Listener) context.Context {
-				return ctxlog.Context(context.Background(), log)
-			},
-			Handler: httpserver.AddRequestIDs(httpserver.LogRequests(handler)),
+			BaseContext: func(net.Listener) context.Context { return ctx },
+			Handler:     httpserver.AddRequestIDs(httpserver.LogRequests(handler)),
 		},
 		Addr: ":",
 	}
diff --git a/lib/controller/trash.go b/lib/controller/trash.go
new file mode 100644
index 000000000..551b2f92b
--- /dev/null
+++ b/lib/controller/trash.go
@@ -0,0 +1,33 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+import (
+	"time"
+
+	"git.arvados.org/arvados.git/lib/controller/dblock"
+	"git.arvados.org/arvados.git/sdk/go/auth"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+)
+
+func (h *Handler) trashSweepWorker() {
+	sleep := h.Cluster.Collections.TrashSweepInterval.Duration()
+	logger := ctxlog.FromContext(h.BackgroundContext).WithField("worker", "trash sweep")
+	ctx := ctxlog.Context(h.BackgroundContext, logger)
+	if sleep <= 0 {
+		logger.Debugf("Collections.TrashSweepInterval is %v, not running worker", sleep)
+		return
+	}
+	dblock.TrashSweep.Lock(ctx, h.db)
+	defer dblock.TrashSweep.Unlock()
+	for time.Sleep(sleep); ctx.Err() == nil; time.Sleep(sleep) {
+		dblock.TrashSweep.Check()
+		ctx := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{h.Cluster.SystemRootToken}})
+		_, err := h.federation.SysTrashSweep(ctx, struct{}{})
+		if err != nil {
+			logger.WithError(err).Info("trash sweep failed")
+		}
+	}
+}
diff --git a/sdk/go/arvados/api.go b/sdk/go/arvados/api.go
index 0fdc13d19..d4af0e7a8 100644
--- a/sdk/go/arvados/api.go
+++ b/sdk/go/arvados/api.go
@@ -68,6 +68,7 @@ var (
 	EndpointLinkGet                       = APIEndpoint{"GET", "arvados/v1/links/{uuid}", ""}
 	EndpointLinkList                      = APIEndpoint{"GET", "arvados/v1/links", ""}
 	EndpointLinkDelete                    = APIEndpoint{"DELETE", "arvados/v1/links/{uuid}", ""}
+	EndpointSysTrashSweep                 = APIEndpoint{"POST", "sys/trash_sweep", ""}
 	EndpointUserActivate                  = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
 	EndpointUserCreate                    = APIEndpoint{"POST", "arvados/v1/users", "user"}
 	EndpointUserCurrent                   = APIEndpoint{"GET", "arvados/v1/users/current", ""}
@@ -269,6 +270,7 @@ type API interface {
 	SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
 	SpecimenList(ctx context.Context, options ListOptions) (SpecimenList, error)
 	SpecimenDelete(ctx context.Context, options DeleteOptions) (Specimen, error)
+	SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error)
 	UserCreate(ctx context.Context, options CreateOptions) (User, error)
 	UserUpdate(ctx context.Context, options UpdateOptions) (User, error)
 	UserMerge(ctx context.Context, options UserMergeOptions) (User, error)
diff --git a/sdk/go/arvados/client.go b/sdk/go/arvados/client.go
index 13bb3bf80..5ec828667 100644
--- a/sdk/go/arvados/client.go
+++ b/sdk/go/arvados/client.go
@@ -217,6 +217,8 @@ func (c *Client) DoAndDecode(dst interface{}, req *http.Request) error {
 		return err
 	}
 	switch {
+	case resp.StatusCode == http.StatusNoContent:
+		return nil
 	case resp.StatusCode == http.StatusOK && dst == nil:
 		return nil
 	case resp.StatusCode == http.StatusOK:
diff --git a/sdk/go/arvadostest/api.go b/sdk/go/arvadostest/api.go
index 0af477125..6990a3fdf 100644
--- a/sdk/go/arvadostest/api.go
+++ b/sdk/go/arvadostest/api.go
@@ -209,6 +209,10 @@ func (as *APIStub) SpecimenDelete(ctx context.Context, options arvados.DeleteOpt
 	as.appendCall(ctx, as.SpecimenDelete, options)
 	return arvados.Specimen{}, as.Error
 }
+func (as *APIStub) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {
+	as.appendCall(ctx, as.SysTrashSweep, options)
+	return struct{}{}, as.Error
+}
 func (as *APIStub) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {
 	as.appendCall(ctx, as.UserCreate, options)
 	return arvados.User{}, as.Error
diff --git a/sdk/go/arvadostest/api_test.go b/sdk/go/arvadostest/api_test.go
new file mode 100644
index 000000000..798d03544
--- /dev/null
+++ b/sdk/go/arvadostest/api_test.go
@@ -0,0 +1,10 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvadostest
+
+import "git.arvados.org/arvados.git/sdk/go/arvados"
+
+// Test that *APIStub implements arvados.API
+var _ arvados.API = &APIStub{}
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index c1d4b74d6..59ac639ba 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -427,6 +427,27 @@ class Arvados::V1::SchemaController < ApplicationController
         }
       }
 
+      discovery[:resources]['sys'] = {
+        methods: {
+          get: {
+            id: "arvados.sys.trash_sweep",
+            path: "sys/trash_sweep",
+            httpMethod: "POST",
+            description: "apply scheduled trash and delete operations",
+            parameters: {
+            },
+            parameterOrder: [
+            ],
+            response: {
+            },
+            scopes: [
+              "https://api.arvados.org/auth/arvados",
+              "https://api.arvados.org/auth/arvados.readonly"
+            ]
+          },
+        }
+      }
+
       Rails.configuration.API.DisabledAPIs.each do |method, _|
         ctrl, action = method.to_s.split('.', 2)
         discovery[:resources][ctrl][:methods].delete(action.to_sym)
diff --git a/services/api/lib/sweep_trashed_objects.rb b/services/api/app/controllers/sys_controller.rb
similarity index 55%
rename from services/api/lib/sweep_trashed_objects.rb
rename to services/api/app/controllers/sys_controller.rb
index c09896567..a67b124bd 100644
--- a/services/api/lib/sweep_trashed_objects.rb
+++ b/services/api/app/controllers/sys_controller.rb
@@ -2,33 +2,12 @@
 #
 # SPDX-License-Identifier: AGPL-3.0
 
-require 'current_api_client'
+class SysController < ApplicationController
+  skip_before_action :find_object_by_uuid
+  skip_before_action :render_404_if_no_object
+  before_action :admin_required
 
-module SweepTrashedObjects
-  extend CurrentApiClient
-
-  def self.delete_project_and_contents(p_uuid)
-    p = Group.find_by_uuid(p_uuid)
-    if !p || p.group_class != 'project'
-      raise "can't sweep group '#{p_uuid}', it may not exist or not be a project"
-    end
-    # First delete sub projects
-    Group.where({group_class: 'project', owner_uuid: p_uuid}).each do |sub_project|
-      delete_project_and_contents(sub_project.uuid)
-    end
-    # Next, iterate over all tables which have owner_uuid fields, with some
-    # exceptions, and delete records owned by this project
-    skipped_classes = ['Group', 'User']
-    ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
-      if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')
-        klass.where({owner_uuid: p_uuid}).destroy_all
-      end
-    end
-    # Finally delete the project itself
-    p.destroy
-  end
-
-  def self.sweep_now
+  def trash_sweep
     act_as_system_user do
       # Sweep trashed collections
       Collection.
@@ -38,45 +17,43 @@ module SweepTrashedObjects
         where('is_trashed = false and trash_at < statement_timestamp()').
         update_all('is_trashed = true')
 
-      # Sweep trashed projects and their contents
+      # Sweep trashed projects and their contents (as well as role
+      # groups that were trashed before #18340 when that was
+      # disallowed)
       Group.
-        where({group_class: 'project'}).
         where('delete_at is not null and delete_at < statement_timestamp()').each do |project|
           delete_project_and_contents(project.uuid)
       end
       Group.
-        where({group_class: 'project'}).
         where('is_trashed = false and trash_at < statement_timestamp()').
         update_all('is_trashed = true')
 
       # Sweep expired tokens
       ActiveRecord::Base.connection.execute("DELETE from api_client_authorizations where expires_at <= statement_timestamp()")
     end
+    head :no_content
   end
 
-  def self.sweep_if_stale
-    return if Rails.configuration.Collections.TrashSweepInterval <= 0
-    exp = Rails.configuration.Collections.TrashSweepInterval.seconds
-    need = false
-    Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do
-      need = true
+  protected
+
+  def delete_project_and_contents(p_uuid)
+    p = Group.find_by_uuid(p_uuid)
+    if !p
+      raise "can't sweep group '#{p_uuid}', it may not exist"
+    end
+    # First delete sub projects
+    Group.where({group_class: 'project', owner_uuid: p_uuid}).each do |sub_project|
+      delete_project_and_contents(sub_project.uuid)
     end
-    if need
-      Thread.new do
-        Thread.current.abort_on_exception = false
-        begin
-          sweep_now
-        rescue => e
-          Rails.logger.error "#{e.class}: #{e}\n#{e.backtrace.join("\n\t")}"
-        ensure
-          # Rails 5.1+ makes test threads share a database connection, so we can't
-          # close a connection shared with other threads.
-          # https://github.com/rails/rails/commit/deba47799ff905f778e0c98a015789a1327d5087
-          if Rails.env != "test"
-            ActiveRecord::Base.connection.close
-          end
-        end
+    # Next, iterate over all tables which have owner_uuid fields, with some
+    # exceptions, and delete records owned by this project
+    skipped_classes = ['Group', 'User']
+    ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|
+      if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')
+        klass.where({owner_uuid: p_uuid}).destroy_all
       end
     end
+    # Finally delete the project itself
+    p.destroy
   end
 end
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index a98cde444..b4660dbd3 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -3,7 +3,6 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'arvados/keep'
-require 'sweep_trashed_objects'
 require 'trashable'
 
 class Collection < ArvadosModel
@@ -616,11 +615,6 @@ class Collection < ArvadosModel
     super - ["manifest_text", "storage_classes_desired", "storage_classes_confirmed", "current_version_uuid"]
   end
 
-  def self.where *args
-    SweepTrashedObjects.sweep_if_stale
-    super
-  end
-
   protected
 
   # Although the defaults for these columns is already set up on the schema,
diff --git a/services/api/config/routes.rb b/services/api/config/routes.rb
index 738426b1d..98f5788d6 100644
--- a/services/api/config/routes.rb
+++ b/services/api/config/routes.rb
@@ -92,6 +92,8 @@ Rails.application.routes.draw do
     end
   end
 
+  post '/sys/trash_sweep', to: 'sys#trash_sweep'
+
   if Rails.env == 'test'
     post '/database/reset', to: 'database#reset'
   end
diff --git a/services/api/test/functional/sys_controller_test.rb b/services/api/test/functional/sys_controller_test.rb
new file mode 100644
index 000000000..e13d70298
--- /dev/null
+++ b/services/api/test/functional/sys_controller_test.rb
@@ -0,0 +1,135 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+require 'test_helper'
+
+class SysControllerTest < ActionController::TestCase
+  include CurrentApiClient
+  include DbCurrentTime
+
+  test "trash_sweep - delete expired tokens" do
+    assert_not_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)
+    authorize_with :admin
+    post :trash_sweep
+    assert_response :success
+    assert_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)
+  end
+
+  test "trash_sweep - fail with non-admin token" do
+    authorize_with :active
+    post :trash_sweep
+    assert_response 403
+  end
+
+  test "trash_sweep - move collections to trash" do
+    c = collections(:trashed_on_next_sweep)
+    refute_empty Collection.where('uuid=? and is_trashed=false', c.uuid)
+    assert_raises(ActiveRecord::RecordNotUnique) do
+      act_as_user users(:active) do
+        Collection.create!(owner_uuid: c.owner_uuid,
+                           name: c.name)
+      end
+    end
+    authorize_with :admin
+    post :trash_sweep
+    assert_response :success
+    c = Collection.where('uuid=? and is_trashed=true', c.uuid).first
+    assert c
+    act_as_user users(:active) do
+      assert Collection.create!(owner_uuid: c.owner_uuid,
+                                name: c.name)
+    end
+  end
+
+  test "trash_sweep - delete collections" do
+    uuid = 'zzzzz-4zz18-3u1p5umicfpqszp' # deleted_on_next_sweep
+    assert_not_empty Collection.where(uuid: uuid)
+    authorize_with :admin
+    post :trash_sweep
+    assert_response :success
+    assert_empty Collection.where(uuid: uuid)
+  end
+
+  test "trash_sweep - delete referring links" do
+    uuid = collections(:trashed_on_next_sweep).uuid
+    act_as_system_user do
+      assert_raises ActiveRecord::RecordInvalid do
+        # Cannot create because :trashed_on_next_sweep is already trashed
+        Link.create!(head_uuid: uuid,
+                     tail_uuid: system_user_uuid,
+                     link_class: 'whatever',
+                     name: 'something')
+      end
+
+      # Bump trash_at to now + 1 minute
+      Collection.where(uuid: uuid).
+        update(trash_at: db_current_time + (1).minute)
+
+      # Not considered trashed now
+      Link.create!(head_uuid: uuid,
+                   tail_uuid: system_user_uuid,
+                   link_class: 'whatever',
+                   name: 'something')
+    end
+    past = db_current_time
+    Collection.where(uuid: uuid).
+      update_all(is_trashed: true, trash_at: past, delete_at: past)
+    assert_not_empty Collection.where(uuid: uuid)
+    authorize_with :admin
+    post :trash_sweep
+    assert_response :success
+    assert_empty Collection.where(uuid: uuid)
+  end
+
+  test "trash_sweep - move projects to trash" do
+    p = groups(:trashed_on_next_sweep)
+    assert_empty Group.where('uuid=? and is_trashed=true', p.uuid)
+    authorize_with :admin
+    post :trash_sweep
+    assert_response :success
+    assert_not_empty Group.where('uuid=? and is_trashed=true', p.uuid)
+  end
+
+  test "trash_sweep - delete projects and their contents" do
+    g_foo = groups(:trashed_project)
+    g_bar = groups(:trashed_subproject)
+    g_baz = groups(:trashed_subproject3)
+    col = collections(:collection_in_trashed_subproject)
+    job = jobs(:job_in_trashed_project)
+    cr = container_requests(:cr_in_trashed_project)
+    # Save how many objects were before the sweep
+    user_nr_was = User.all.length
+    coll_nr_was = Collection.all.length
+    group_nr_was = Group.where('group_class<>?', 'project').length
+    project_nr_was = Group.where(group_class: 'project').length
+    cr_nr_was = ContainerRequest.all.length
+    job_nr_was = Job.all.length
+    assert_not_empty Group.where(uuid: g_foo.uuid)
+    assert_not_empty Group.where(uuid: g_bar.uuid)
+    assert_not_empty Group.where(uuid: g_baz.uuid)
+    assert_not_empty Collection.where(uuid: col.uuid)
+    assert_not_empty Job.where(uuid: job.uuid)
+    assert_not_empty ContainerRequest.where(uuid: cr.uuid)
+
+    authorize_with :admin
+    post :trash_sweep
+    assert_response :success
+
+    assert_empty Group.where(uuid: g_foo.uuid)
+    assert_empty Group.where(uuid: g_bar.uuid)
+    assert_empty Group.where(uuid: g_baz.uuid)
+    assert_empty Collection.where(uuid: col.uuid)
+    assert_empty Job.where(uuid: job.uuid)
+    assert_empty ContainerRequest.where(uuid: cr.uuid)
+    # No unwanted deletions should have happened
+    assert_equal user_nr_was, User.all.length
+    assert_equal coll_nr_was-2,        # collection_in_trashed_subproject
+                 Collection.all.length # & deleted_on_next_sweep collections
+    assert_equal group_nr_was, Group.where('group_class<>?', 'project').length
+    assert_equal project_nr_was-3, Group.where(group_class: 'project').length
+    assert_equal cr_nr_was-1, ContainerRequest.all.length
+    assert_equal job_nr_was-1, Job.all.length
+  end
+
+end
diff --git a/services/api/test/integration/errors_test.rb b/services/api/test/integration/errors_test.rb
index e3224f491..a2a1545ce 100644
--- a/services/api/test/integration/errors_test.rb
+++ b/services/api/test/integration/errors_test.rb
@@ -24,7 +24,7 @@ class ErrorsTest < ActionDispatch::IntegrationTest
       # Generally, new routes should appear under /arvados/v1/. If
       # they appear elsewhere, that might have been caused by default
       # rails generator behavior that we don't want.
-      assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|database\/reset|discovery\/.*|static\/.*|themes\/.*|assets|_health\/.*)(\(\.:format\))?$/,
+      assert_match(/^\/(|\*a|arvados\/v1\/.*|auth\/.*|login|logout|database\/reset|discovery\/.*|static\/.*|sys\/trash_sweep|themes\/.*|assets|_health\/.*)(\(\.:format\))?$/,
                    route.path.spec.to_s,
                    "Unexpected new route: #{route.path.spec}")
     end
diff --git a/services/api/test/unit/api_client_authorization_test.rb b/services/api/test/unit/api_client_authorization_test.rb
index fb90418b8..e043f8914 100644
--- a/services/api/test/unit/api_client_authorization_test.rb
+++ b/services/api/test/unit/api_client_authorization_test.rb
@@ -3,7 +3,6 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'test_helper'
-require 'sweep_trashed_objects'
 
 class ApiClientAuthorizationTest < ActiveSupport::TestCase
   include CurrentApiClient
@@ -20,12 +19,6 @@ class ApiClientAuthorizationTest < ActiveSupport::TestCase
     end
   end
 
-  test "delete expired in SweepTrashedObjects" do
-    assert_not_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)
-    SweepTrashedObjects.sweep_now
-    assert_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)
-  end
-
   test "accepts SystemRootToken" do
     assert_nil ApiClientAuthorization.validate(token: "xxxSystemRootTokenxxx")
 
diff --git a/services/api/test/unit/collection_test.rb b/services/api/test/unit/collection_test.rb
index de0f1d360..e7134a5be 100644
--- a/services/api/test/unit/collection_test.rb
+++ b/services/api/test/unit/collection_test.rb
@@ -3,7 +3,6 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 require 'test_helper'
-require 'sweep_trashed_objects'
 require 'fix_collection_versions_timestamps'
 
 class CollectionTest < ActiveSupport::TestCase
@@ -1058,60 +1057,6 @@ class CollectionTest < ActiveSupport::TestCase
     assert_includes(coll_uuids, collections(:docker_image).uuid)
   end
 
-  test "move collections to trash in SweepTrashedObjects" do
-    c = collections(:trashed_on_next_sweep)
-    refute_empty Collection.where('uuid=? and is_trashed=false', c.uuid)
-    assert_raises(ActiveRecord::RecordNotUnique) do
-      act_as_user users(:active) do
-        Collection.create!(owner_uuid: c.owner_uuid,
-                           name: c.name)
-      end
-    end
-    SweepTrashedObjects.sweep_now
-    c = Collection.where('uuid=? and is_trashed=true', c.uuid).first
-    assert c
-    act_as_user users(:active) do
-      assert Collection.create!(owner_uuid: c.owner_uuid,
-                                name: c.name)
-    end
-  end
-
-  test "delete collections in SweepTrashedObjects" do
-    uuid = 'zzzzz-4zz18-3u1p5umicfpqszp' # deleted_on_next_sweep
-    assert_not_empty Collection.where(uuid: uuid)
-    SweepTrashedObjects.sweep_now
-    assert_empty Collection.where(uuid: uuid)
-  end
-
-  test "delete referring links in SweepTrashedObjects" do
-    uuid = collections(:trashed_on_next_sweep).uuid
-    act_as_system_user do
-      assert_raises ActiveRecord::RecordInvalid do
-        # Cannot create because :trashed_on_next_sweep is already trashed
-        Link.create!(head_uuid: uuid,
-                     tail_uuid: system_user_uuid,
-                     link_class: 'whatever',
-                     name: 'something')
-      end
-
-      # Bump trash_at to now + 1 minute
-      Collection.where(uuid: uuid).
-        update(trash_at: db_current_time + (1).minute)
-
-      # Not considered trashed now
-      Link.create!(head_uuid: uuid,
-                   tail_uuid: system_user_uuid,
-                   link_class: 'whatever',
-                   name: 'something')
-    end
-    past = db_current_time
-    Collection.where(uuid: uuid).
-      update_all(is_trashed: true, trash_at: past, delete_at: past)
-    assert_not_empty Collection.where(uuid: uuid)
-    SweepTrashedObjects.sweep_now
-    assert_empty Collection.where(uuid: uuid)
-  end
-
   test "empty names are exempt from name uniqueness" do
     act_as_user users(:active) do
       c1 = Collection.new(name: nil, manifest_text: '', owner_uuid: groups(:aproject).uuid)
diff --git a/services/api/test/unit/group_test.rb b/services/api/test/unit/group_test.rb
index 017916f48..10932e116 100644
--- a/services/api/test/unit/group_test.rb
+++ b/services/api/test/unit/group_test.rb
@@ -228,50 +228,6 @@ class GroupTest < ActiveSupport::TestCase
     assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?
   end
 
-  test "move projects to trash in SweepTrashedObjects" do
-    p = groups(:trashed_on_next_sweep)
-    assert_empty Group.where('uuid=? and is_trashed=true', p.uuid)
-    SweepTrashedObjects.sweep_now
-    assert_not_empty Group.where('uuid=? and is_trashed=true', p.uuid)
-  end
-
-  test "delete projects and their contents in SweepTrashedObjects" do
-    g_foo = groups(:trashed_project)
-    g_bar = groups(:trashed_subproject)
-    g_baz = groups(:trashed_subproject3)
-    col = collections(:collection_in_trashed_subproject)
-    job = jobs(:job_in_trashed_project)
-    cr = container_requests(:cr_in_trashed_project)
-    # Save how many objects were before the sweep
-    user_nr_was = User.all.length
-    coll_nr_was = Collection.all.length
-    group_nr_was = Group.where('group_class<>?', 'project').length
-    project_nr_was = Group.where(group_class: 'project').length
-    cr_nr_was = ContainerRequest.all.length
-    job_nr_was = Job.all.length
-    assert_not_empty Group.where(uuid: g_foo.uuid)
-    assert_not_empty Group.where(uuid: g_bar.uuid)
-    assert_not_empty Group.where(uuid: g_baz.uuid)
-    assert_not_empty Collection.where(uuid: col.uuid)
-    assert_not_empty Job.where(uuid: job.uuid)
-    assert_not_empty ContainerRequest.where(uuid: cr.uuid)
-    SweepTrashedObjects.sweep_now
-    assert_empty Group.where(uuid: g_foo.uuid)
-    assert_empty Group.where(uuid: g_bar.uuid)
-    assert_empty Group.where(uuid: g_baz.uuid)
-    assert_empty Collection.where(uuid: col.uuid)
-    assert_empty Job.where(uuid: job.uuid)
-    assert_empty ContainerRequest.where(uuid: cr.uuid)
-    # No unwanted deletions should have happened
-    assert_equal user_nr_was, User.all.length
-    assert_equal coll_nr_was-2,        # collection_in_trashed_subproject
-                 Collection.all.length # & deleted_on_next_sweep collections
-    assert_equal group_nr_was, Group.where('group_class<>?', 'project').length
-    assert_equal project_nr_was-3, Group.where(group_class: 'project').length
-    assert_equal cr_nr_was-1, ContainerRequest.all.length
-    assert_equal job_nr_was-1, Job.all.length
-  end
-
   test "project names must be displayable in a filesystem" do
     set_user_from_auth :active
     ["", "{SOLIDUS}"].each do |subst|

commit ba3dfca2da03a57a5f732dd6fb7bbaf744add9a5
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Nov 18 20:15:07 2021 -0500

    Using curl against https requires the ca-certificates package, update
    the docs accordingly. Also remove the 'silent' flag from curl so that
    errors are shown.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_includes/_install_debian_key.liquid b/doc/_includes/_install_debian_key.liquid
index b25674c8c..91b24a8a8 100644
--- a/doc/_includes/_install_debian_key.liquid
+++ b/doc/_includes/_install_debian_key.liquid
@@ -5,8 +5,8 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 <notextile>
-<pre><code># <span class="userinput">apt-get --no-install-recommends install curl gnupg2</span>
-# <span class="userinput">curl -s https://apt.arvados.org/pubkey.gpg -o /etc/apt/trusted.gpg.d/arvados.asc</span>
+<pre><code># <span class="userinput">apt-get --no-install-recommends install curl gnupg2 ca-certificates</span>
+# <span class="userinput">curl https://apt.arvados.org/pubkey.gpg -o /etc/apt/trusted.gpg.d/arvados.asc</span>
 </code></pre>
 </notextile>
 

commit 2b70210eedf5ed0cec12a904d04e4c3b33def073
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Thu Nov 18 14:38:46 2021 -0500

    Upgrade notes are for 2.3.1 refs #18361
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index c1a7ae87d..d3329f783 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -35,21 +35,14 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
-h2(#main). development main (as of 2021-11-10)
+h2(#v2_3_1). v2.3.1 (2021-11-19)
 
-"previous: Upgrading from 2.3.0":#v2_3_0
+"previous: Upgrading to 2.3.0":#v2_3_0
 
 h3. Users are visible to other users by default
 
 When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false at .
 
-h3. Dedicated keepstore process for each container
-
-When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.
-* If you have containers that can effectively handle multiple I/O threads per VCPU, consider increasing the @Containers.LocalKeepBlobBuffersPerVCPU@ value.
-* If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers.
-* This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication at . If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log.
-
 h3. Backend support for vocabulary checking
 
 If your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed.

commit 949b55d6419e0de7c2386278cb17d11b3beb3b20
Author: Tom Clegg <tom at curii.com>
Date:   Thu Oct 28 10:28:44 2021 -0400

    Fix up "upgrading to {earlier version}" links.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index dfee7e0b5..c1a7ae87d 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -58,7 +58,7 @@ You can read more about how this feature works on the "admin page":{{site.baseur
 
 h2(#v2_3_0). v2.3.0 (2021-10-27)
 
-"previous: Upgrading from 2.2.0":#v2_2_0
+"previous: Upgrading to 2.2.0":#v2_2_0
 
 h3. Ubuntu 18.04 packages for arvados-api-server and arvados-workbench now conflict with ruby-bundler
 
@@ -90,7 +90,7 @@ Typically a docker image collection contains a single @.tar@ file at the top lev
 
 h2(#v2_2_0). v2.2.0 (2021-06-03)
 
-"previous: Upgrading from 2.1.0":#v2_1_0
+"previous: Upgrading to 2.1.0":#v2_1_0
 
 h3. New spelling of S3 credential configs
 
@@ -125,7 +125,7 @@ The ForceLegacyAPI14 configuration option has been removed. In the unlikely even
 
 h2(#v2_1_0). v2.1.0 (2020-10-13)
 
-"Upgrading from 2.0.0":#v2_0_0
+"previous: Upgrading to 2.0.0":#v2_0_0
 
 h3. LoginCluster conflicts with other Login providers
 
@@ -212,7 +212,7 @@ As a side effect of new permission system constraints, "star" links (indicating
 
 h2(#v2_0_0). v2.0.0 (2020-02-07)
 
-"Upgrading from 1.4":#v1_4_1
+"previous: Upgrading to 1.4.1":#v1_4_1
 
 Arvados 2.0 is a major upgrade, with many changes.  Please read these upgrade notes carefully before you begin.
 
@@ -340,7 +340,7 @@ The API server accepts both PUT and PATCH for updates, but they will be normaliz
 
 h2(#v1_4_1). v1.4.1 (2019-09-20)
 
-"Upgrading from 1.4.0":#v1_4_0
+"previous: Upgrading to 1.4.0":#v1_4_0
 
 h3. Centos7 Python 3 dependency upgraded to rh-python36
 
@@ -348,7 +348,7 @@ The Python 3 dependency for Centos7 Arvados packages was upgraded from rh-python
 
 h2(#v1_4_0). v1.4.0 (2019-06-05)
 
-"Upgrading from 1.3.3":#v1_3_3
+"previous: Upgrading to 1.3.3":#v1_3_3
 
 h3. Populating the new file_count and file_size_total columns on the collections table
 
@@ -455,7 +455,7 @@ Arvados is migrating to a centralized configuration file for all components.  Du
 
 h2(#v1_3_3). v1.3.3 (2019-05-14)
 
-"Upgrading from 1.3.0":#v1_3_0
+"previous: Upgrading to 1.3.0":#v1_3_0
 
 This release corrects a potential data loss issue, if you are running Arvados 1.3.0 or 1.3.1 we strongly recommended disabling @keep-balance@ until you can upgrade to 1.3.3 or 1.4.0. With keep-balance disabled, there is no chance of data loss.
 
@@ -463,7 +463,7 @@ We've put together a "wiki page":https://dev.arvados.org/projects/arvados/wiki/R
 
 h2(#v1_3_0). v1.3.0 (2018-12-05)
 
-"Upgrading from 1.2":#v1_2_0
+"previous: Upgrading to 1.2":#v1_2_0
 
 This release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections.
 
@@ -477,7 +477,7 @@ There are no special upgrade notes for this release.
 
 h2(#v1_2_0). v1.2.0 (2018-09-05)
 
-"Upgrading from 1.1.2 or 1.1.3":#v1_1_2
+"previous: Upgrading to 1.1.2 or 1.1.3":#v1_1_2
 
 h3. Regenerate Postgres table statistics
 
@@ -509,7 +509,7 @@ Verify your setup by confirming that API calls appear in the controller's logs (
 
 h2(#v1_1_4). v1.1.4 (2018-04-10)
 
-"Upgrading from 1.1.3":#v1_1_3
+"previous: Upgrading to 1.1.3":#v1_1_3
 
 h3. arvados-cwl-runner regressions (2018-04-05)
 
@@ -642,7 +642,7 @@ There are no special upgrade notes for this release.
 
 h2(#v1_1_2). v1.1.2 (2017-12-22)
 
-"Upgrading from 1.1.0 or 1.1.1":#v1_1_0
+"previous: Upgrading to 1.1.0 or 1.1.1":#v1_1_0
 
 h3. The minimum version for Postgres is now 9.4 (2017-12-08)
 

commit cd034042a1ea9950ebea9109ce857eee72adc249
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Nov 17 17:03:57 2021 -0500

    Merge branch '17962-check-wf-definition' refs #17962
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/sdk/python/arvados/commands/arv_copy.py b/sdk/python/arvados/commands/arv_copy.py
index d10234ca6..7951842ac 100755
--- a/sdk/python/arvados/commands/arv_copy.py
+++ b/sdk/python/arvados/commands/arv_copy.py
@@ -113,7 +113,7 @@ def main():
     copy_opts.set_defaults(recursive=True)
 
     parser = argparse.ArgumentParser(
-        description='Copy a workflow, collection or project from one Arvados instance to another.',
+        description='Copy a workflow, collection or project from one Arvados instance to another.  On success, the uuid of the copied object is printed to stdout.',
         parents=[copy_opts, arv_cmd.retry_opt])
     args = parser.parse_args()
 
@@ -161,7 +161,12 @@ def main():
         logger.error("API server returned an error result: {}".format(result))
         exit(1)
 
-    logger.info("")
+    print(result['uuid'])
+
+    if result.get('partial_error'):
+        logger.warning("Warning: created copy with uuid {} but failed to copy some items: {}".format(result['uuid'], result['partial_error']))
+        exit(1)
+
     logger.info("Success: created copy with uuid {}".format(result['uuid']))
     exit(0)
 
@@ -292,8 +297,11 @@ def copy_workflow(wf_uuid, src, dst, args):
     # fetch the workflow from the source instance
     wf = src.workflows().get(uuid=wf_uuid).execute(num_retries=args.retries)
 
+    if not wf["definition"]:
+        logger.warning("Workflow object {} has an empty or null definition, it won't do anything.".format(wf_uuid))
+
     # copy collections and docker images
-    if args.recursive:
+    if args.recursive and wf["definition"]:
         wf_def = yaml.safe_load(wf["definition"])
         if wf_def is not None:
             locations = []
@@ -683,17 +691,31 @@ def copy_project(obj_uuid, src, dst, owner_uuid, args):
 
     logger.debug('Copying %s to %s', obj_uuid, project_record["uuid"])
 
+
+    partial_error = ""
+
     # Copy collections
-    copy_collections([col["uuid"] for col in arvados.util.list_all(src.collections().list, filters=[["owner_uuid", "=", obj_uuid]])],
-                     src, dst, args)
+    try:
+        copy_collections([col["uuid"] for col in arvados.util.list_all(src.collections().list, filters=[["owner_uuid", "=", obj_uuid]])],
+                         src, dst, args)
+    except Exception as e:
+        partial_error += "\n" + str(e)
 
     # Copy workflows
     for w in arvados.util.list_all(src.workflows().list, filters=[["owner_uuid", "=", obj_uuid]]):
-        copy_workflow(w["uuid"], src, dst, args)
+        try:
+            copy_workflow(w["uuid"], src, dst, args)
+        except Exception as e:
+            partial_error += "\n" + "Error while copying %s: %s" % (w["uuid"], e)
 
     if args.recursive:
         for g in arvados.util.list_all(src.groups().list, filters=[["owner_uuid", "=", obj_uuid]]):
-            copy_project(g["uuid"], src, dst, project_record["uuid"], args)
+            try:
+                copy_project(g["uuid"], src, dst, project_record["uuid"], args)
+            except Exception as e:
+                partial_error += "\n" + "Error while copying %s: %s" % (g["uuid"], e)
+
+    project_record["partial_error"] = partial_error
 
     return project_record
 
diff --git a/sdk/python/tests/test_arv_copy.py b/sdk/python/tests/test_arv_copy.py
index b560018d3..b853b3304 100644
--- a/sdk/python/tests/test_arv_copy.py
+++ b/sdk/python/tests/test_arv_copy.py
@@ -61,10 +61,13 @@ class ArvCopyVersionTestCase(run_test_server.TestCaseWithServers, tutil.VersionC
             contents = api.groups().list(filters=[["owner_uuid", "=", dest_proj]]).execute()
             assert len(contents["items"]) == 0
 
-            try:
-                self.run_copy(["--project-uuid", dest_proj, "--storage-classes", "foo", src_proj])
-            except SystemExit as e:
-                assert e.code == 0
+            with tutil.redirected_streams(
+                    stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):
+                try:
+                    self.run_copy(["--project-uuid", dest_proj, "--storage-classes", "foo", src_proj])
+                except SystemExit as e:
+                    assert e.code == 0
+                copy_uuid_from_stdout = out.getvalue().strip()
 
             contents = api.groups().list(filters=[["owner_uuid", "=", dest_proj]]).execute()
             assert len(contents["items"]) == 1
@@ -72,6 +75,8 @@ class ArvCopyVersionTestCase(run_test_server.TestCaseWithServers, tutil.VersionC
             assert contents["items"][0]["name"] == "arv-copy project"
             copied_project = contents["items"][0]["uuid"]
 
+            assert copied_project == copy_uuid_from_stdout
+
             contents = api.collections().list(filters=[["owner_uuid", "=", copied_project]]).execute()
             assert len(contents["items"]) == 1
 

commit bbc934a55d42bcd46ad0a7d33456b37c0be18f61
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Nov 17 12:55:16 2021 -0500

    Merge branch '18285-cwl-hint-warning' refs #18285
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/sdk/cwl/setup.py b/sdk/cwl/setup.py
index e39fdd8d9..f034ca5ab 100644
--- a/sdk/cwl/setup.py
+++ b/sdk/cwl/setup.py
@@ -39,8 +39,8 @@ setup(name='arvados-cwl-runner',
       # file to determine what version of cwltool and schema-salad to
       # build.
       install_requires=[
-          'cwltool==3.1.20211020155521',
-          'schema-salad==8.2.20211020114435',
+          'cwltool==3.1.20211107152837',
+          'schema-salad==8.2.20211116214159',
           'arvados-python-client{}'.format(pysdk_dep),
           'setuptools',
           'ciso8601 >= 2.0.0',

commit c73a78ead6b493df1f4b44cd1e1a43d6c268f6fa
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Wed Nov 17 15:28:37 2021 -0300

    Merge branch '18363-managed-properties-doc-improvement' into main.
    Closes #18363
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/doc/admin/collection-managed-properties.html.textile.liquid b/doc/admin/collection-managed-properties.html.textile.liquid
index 395200126..341030c41 100644
--- a/doc/admin/collection-managed-properties.html.textile.liquid
+++ b/doc/admin/collection-managed-properties.html.textile.liquid
@@ -41,13 +41,23 @@ h4. Protected properties
 
 If there's a need to prevent a non-admin user from modifying a specific property, even by its owner, the @Protected@ attribute can be set to @true@, like so:
 
+<pre>
+Collections:
+  ManagedProperties:
+    sample_id: {Protected: true}
+</pre>
+
+This configuration won't assign a @sample_id@ property on collection creation, but if the user adds it to any collection, its value is protected from that point on.
+
+Another use case would be to protect properties that were automatically assigned by the system:
+
 <pre>
 Collections:
   ManagedProperties:
     responsible_person_uuid: {Function: original_owner, Protected: true}
 </pre>
 
-This property can be applied to any of the defined managed properties. If missing, it's assumed as being @false@ by default.
+If missing, the @Protected@ attribute it’s assumed as being @false@ by default.
 
 h3. Supporting example scripts
 

commit 6a7233ad1f3afc8b128c647810d38ad9cd158f69
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Wed Nov 17 12:03:42 2021 -0300

    Merge branch '18340-delete-role-filter-groups' into main. Closes #18340.
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
index 8d15bb1c5..7fbb86c01 100644
--- a/services/api/app/controllers/arvados/v1/groups_controller.rb
+++ b/services/api/app/controllers/arvados/v1/groups_controller.rb
@@ -10,6 +10,8 @@ class Arvados::V1::GroupsController < ApplicationController
   skip_before_action :find_object_by_uuid, only: :shared
   skip_before_action :render_404_if_no_object, only: :shared
 
+  TRASHABLE_CLASSES = ['project']
+
   def self._index_requires_parameters
     (super rescue {}).
       merge({
@@ -99,6 +101,15 @@ class Arvados::V1::GroupsController < ApplicationController
     end
   end
 
+  def destroy
+    if !TRASHABLE_CLASSES.include?(@object.group_class)
+      return @object.destroy
+      show
+    else
+      super # Calls destroy from TrashableController module
+    end
+  end
+
   def render_404_if_no_object
     if params[:action] == 'contents'
       if !params[:uuid]
@@ -351,8 +362,6 @@ class Arvados::V1::GroupsController < ApplicationController
     @offset = offset_all
   end
 
-  protected
-
   def exclude_home objectlist, klass
     # select records that are readable by current user AND
     #   the owner_uuid is a user (but not the current user) OR
diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb
index 02a4ce966..4dbccc5eb 100644
--- a/services/api/test/functional/arvados/v1/groups_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/groups_controller_test.rb
@@ -538,6 +538,21 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
     assert_includes(owners, groups(:asubproject).uuid)
   end
 
+  [:afiltergroup, :private_role].each do |grp|
+    test "delete non-project group #{grp}" do
+      authorize_with :admin
+      assert_not_nil Group.find_by_uuid(groups(grp).uuid)
+      assert !Group.find_by_uuid(groups(grp).uuid).is_trashed
+      post :destroy, params: {
+            id: groups(grp).uuid,
+            format: :json,
+          }
+      assert_response :success
+      # Should not be trashed
+      assert_nil Group.find_by_uuid(groups(grp).uuid)
+    end
+  end
+
   ### trashed project tests ###
 
   #

commit 59240220e48bcf508daebcf980c1e2db20ccc0e7
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Wed Nov 17 12:16:54 2021 -0300

    Merge branch '18336-httplib2-pysdk-issues' into main. Closes #18336
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/sdk/python/setup.py b/sdk/python/setup.py
index 8d637303b..f82d44ab6 100644
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -50,7 +50,7 @@ setup(name='arvados-python-client',
           'future',
           'google-api-python-client >=1.6.2, <2',
           'google-auth<2',
-          'httplib2 >=0.9.2',
+          'httplib2 >=0.9.2, <0.20.2',
           'pycurl >=7.19.5.1',
           'ruamel.yaml >=0.15.54, <0.17.11',
           'setuptools',

commit b9b43736e711f10fcf9c031bafba2464bb2ce386
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Fri Nov 12 09:38:05 2021 -0500

    Merge branch '18346-crunchrun-no-events' refs #18346
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go
index 3036d5555..10e5193a8 100644
--- a/lib/crunchrun/crunchrun.go
+++ b/lib/crunchrun/crunchrun.go
@@ -605,10 +605,15 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
 	}
 
 	if pdhOnly {
-		arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id")
+		// If we are only mounting collections by pdh, make
+		// sure we don't subscribe to websocket events to
+		// avoid putting undesired load on the API server
+		arvMountCmd = append(arvMountCmd, "--mount-by-pdh", "by_id", "--disable-event-listening")
 	} else {
 		arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_id")
 	}
+	// the by_uuid mount point is used by singularity when writing
+	// out docker images converted to SIF
 	arvMountCmd = append(arvMountCmd, "--mount-by-id", "by_uuid")
 	arvMountCmd = append(arvMountCmd, runner.ArvMountPoint)
 
diff --git a/lib/crunchrun/crunchrun_test.go b/lib/crunchrun/crunchrun_test.go
index 4c5f517b1..c28cf73cb 100644
--- a/lib/crunchrun/crunchrun_test.go
+++ b/lib/crunchrun/crunchrun_test.go
@@ -1126,7 +1126,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}})
 		os.RemoveAll(cr.ArvMountPoint)
 		cr.CleanupDirs()
@@ -1146,7 +1146,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "foo,bar", "--crunchstat-interval=5",
-			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/out": {realTemp + "/tmp2", false}, "/tmp": {realTemp + "/tmp3", false}})
 		os.RemoveAll(cr.ArvMountPoint)
 		cr.CleanupDirs()
@@ -1166,7 +1166,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}, "/etc/arvados/ca-certificates.crt": {stubCertPath, true}})
 		os.RemoveAll(cr.ArvMountPoint)
 		cr.CleanupDirs()
@@ -1189,7 +1189,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/keeptmp": {realTemp + "/keep1/tmp0", false}})
 		os.RemoveAll(cr.ArvMountPoint)
 		cr.CleanupDirs()
@@ -1212,7 +1212,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
 			"/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
 			"/keepout": {realTemp + "/keep1/tmp0", false},
@@ -1239,7 +1239,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
 			"/keepinp": {realTemp + "/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53", true},
 			"/keepout": {realTemp + "/keep1/tmp0", false},
@@ -1322,7 +1322,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		c.Check(err, IsNil)
 		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
-			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
+			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--disable-event-listening", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
 			"/tmp":     {realTemp + "/tmp2", false},
 			"/tmp/foo": {realTemp + "/keep1/tmp0", true},

commit ca0dd0691c1d5053794681bbfb063926e49c039a
Author: Tom Clegg <tom at curii.com>
Date:   Tue Nov 16 16:34:45 2021 -0500

    Merge branch '18376-nfs-readdirent'
    
    fixes #18376
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index f076ccf18..46f4db409 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -359,47 +359,53 @@ var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
 //     e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
 //
 func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
-	var lastErr error
 	rootdir, err := v.os.Open(v.Root)
 	if err != nil {
 		return err
 	}
-	defer rootdir.Close()
 	v.os.stats.TickOps("readdir")
 	v.os.stats.Tick(&v.os.stats.ReaddirOps)
-	for {
-		names, err := rootdir.Readdirnames(1)
-		if err == io.EOF {
-			return lastErr
-		} else if err != nil {
-			return err
-		}
-		if !strings.HasPrefix(names[0], prefix) && !strings.HasPrefix(prefix, names[0]) {
+	subdirs, err := rootdir.Readdirnames(-1)
+	rootdir.Close()
+	if err != nil {
+		return err
+	}
+	for _, subdir := range subdirs {
+		if !strings.HasPrefix(subdir, prefix) && !strings.HasPrefix(prefix, subdir) {
 			// prefix excludes all blocks stored in this dir
 			continue
 		}
-		if !blockDirRe.MatchString(names[0]) {
+		if !blockDirRe.MatchString(subdir) {
 			continue
 		}
-		blockdirpath := filepath.Join(v.Root, names[0])
+		blockdirpath := filepath.Join(v.Root, subdir)
 		blockdir, err := v.os.Open(blockdirpath)
 		if err != nil {
 			v.logger.WithError(err).Errorf("error reading %q", blockdirpath)
-			lastErr = fmt.Errorf("error reading %q: %s", blockdirpath, err)
-			continue
+			return fmt.Errorf("error reading %q: %s", blockdirpath, err)
 		}
 		v.os.stats.TickOps("readdir")
 		v.os.stats.Tick(&v.os.stats.ReaddirOps)
-		for {
-			fileInfo, err := blockdir.Readdir(1)
-			if err == io.EOF {
-				break
+		// ReadDir() (compared to Readdir(), which returns
+		// FileInfo structs) helps complete the sequence of
+		// readdirent calls as quickly as possible, reducing
+		// the likelihood of NFS EBADCOOKIE (523) errors.
+		dirents, err := blockdir.ReadDir(-1)
+		blockdir.Close()
+		if err != nil {
+			v.logger.WithError(err).Errorf("error reading %q", blockdirpath)
+			return fmt.Errorf("error reading %q: %s", blockdirpath, err)
+		}
+		for _, dirent := range dirents {
+			fileInfo, err := dirent.Info()
+			if os.IsNotExist(err) {
+				// File disappeared between ReadDir() and now
+				continue
 			} else if err != nil {
-				v.logger.WithError(err).Errorf("error reading %q", blockdirpath)
-				lastErr = fmt.Errorf("error reading %q: %s", blockdirpath, err)
-				break
+				v.logger.WithError(err).Errorf("error getting FileInfo for %q in %q", dirent.Name(), blockdirpath)
+				return err
 			}
-			name := fileInfo[0].Name()
+			name := fileInfo.Name()
 			if !strings.HasPrefix(name, prefix) {
 				continue
 			}
@@ -408,16 +414,15 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
 			}
 			_, err = fmt.Fprint(w,
 				name,
-				"+", fileInfo[0].Size(),
-				" ", fileInfo[0].ModTime().UnixNano(),
+				"+", fileInfo.Size(),
+				" ", fileInfo.ModTime().UnixNano(),
 				"\n")
 			if err != nil {
-				blockdir.Close()
 				return fmt.Errorf("error writing: %s", err)
 			}
 		}
-		blockdir.Close()
 	}
+	return nil
 }
 
 // Trash trashes the block data from the unix storage

commit 595af530fb6a19152421af0f7134953bb366f668
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Tue Nov 16 15:48:47 2021 -0300

    Merge branch '17635-pysdk-collection-preserve-version' into main. Closes #17635
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/doc/user/topics/collection-versioning.html.textile.liquid b/doc/user/topics/collection-versioning.html.textile.liquid
index 9a32de0d0..d6a3bb4c1 100644
--- a/doc/user/topics/collection-versioning.html.textile.liquid
+++ b/doc/user/topics/collection-versioning.html.textile.liquid
@@ -18,7 +18,7 @@ A version will be saved when one of the following conditions is true:
 
 One is by "configuring (system-wide) the collection's idle time":{{site.baseurl}}/admin/collection-versioning.html. This idle time is checked against the @modified_at@ attribute so that the version is saved when one or more of the previously enumerated attributes get updated and the @modified_at@ is at least at the configured idle time in the past. This way, a frequently updated collection won't create lots of version records that may not be useful.
 
-The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated.
+The other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated. This includes either creating a new collection or updating a preexisting one. In the case of using @preserve_version = true@ on a collection's create call, the new record state will be preserved as a snapshot on the next update.
 
 h3. Collection's past versions behavior & limitations
 
diff --git a/lib/config/export.go b/lib/config/export.go
index 1d2ea6c98..b413bcd75 100644
--- a/lib/config/export.go
+++ b/lib/config/export.go
@@ -96,7 +96,7 @@ var whitelist = map[string]bool{
 	"Collections.BlobTrashCheckInterval":                  false,
 	"Collections.BlobTrashConcurrency":                    false,
 	"Collections.BlobTrashLifetime":                       false,
-	"Collections.CollectionVersioning":                    false,
+	"Collections.CollectionVersioning":                    true,
 	"Collections.DefaultReplication":                      true,
 	"Collections.DefaultTrashLifetime":                    true,
 	"Collections.ForwardSlashNameSubstitution":            true,
diff --git a/sdk/python/arvados/collection.py b/sdk/python/arvados/collection.py
index d03265ca4..55be40fa0 100644
--- a/sdk/python/arvados/collection.py
+++ b/sdk/python/arvados/collection.py
@@ -1546,7 +1546,8 @@ class Collection(RichCollectionBase):
              storage_classes=None,
              trash_at=None,
              merge=True,
-             num_retries=None):
+             num_retries=None,
+             preserve_version=False):
         """Save collection to an existing collection record.
 
         Commit pending buffer blocks to Keep, merge with remote record (if
@@ -1576,6 +1577,13 @@ class Collection(RichCollectionBase):
         :num_retries:
           Retry count on API calls (if None,  use the collection default)
 
+        :preserve_version:
+          If True, indicate that the collection content being saved right now
+          should be preserved in a version snapshot if the collection record is
+          updated in the future. Requires that the API server has
+          Collections.CollectionVersioning enabled, if not, setting this will
+          raise an exception.
+
         """
         if properties and type(properties) is not dict:
             raise errors.ArgumentError("properties must be dictionary type.")
@@ -1588,6 +1596,9 @@ class Collection(RichCollectionBase):
         if trash_at and type(trash_at) is not datetime.datetime:
             raise errors.ArgumentError("trash_at must be datetime type.")
 
+        if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):
+            raise errors.ArgumentError("preserve_version is not supported when CollectionVersioning is not enabled.")
+
         body={}
         if properties:
             body["properties"] = properties
@@ -1596,6 +1607,8 @@ class Collection(RichCollectionBase):
         if trash_at:
             t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
             body["trash_at"] = t
+        if preserve_version:
+            body["preserve_version"] = preserve_version
 
         if not self.committed():
             if self._has_remote_blocks:
@@ -1641,7 +1654,8 @@ class Collection(RichCollectionBase):
                  storage_classes=None,
                  trash_at=None,
                  ensure_unique_name=False,
-                 num_retries=None):
+                 num_retries=None,
+                 preserve_version=False):
         """Save collection to a new collection record.
 
         Commit pending buffer blocks to Keep and, when create_collection_record
@@ -1680,6 +1694,13 @@ class Collection(RichCollectionBase):
         :num_retries:
           Retry count on API calls (if None,  use the collection default)
 
+        :preserve_version:
+          If True, indicate that the collection content being saved right now
+          should be preserved in a version snapshot if the collection record is
+          updated in the future. Requires that the API server has
+          Collections.CollectionVersioning enabled, if not, setting this will
+          raise an exception.
+
         """
         if properties and type(properties) is not dict:
             raise errors.ArgumentError("properties must be dictionary type.")
@@ -1690,6 +1711,9 @@ class Collection(RichCollectionBase):
         if trash_at and type(trash_at) is not datetime.datetime:
             raise errors.ArgumentError("trash_at must be datetime type.")
 
+        if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):
+            raise errors.ArgumentError("preserve_version is not supported when CollectionVersioning is not enabled.")
+
         if self._has_remote_blocks:
             # Copy any remote blocks to the local cluster.
             self._copy_remote_blocks(remote_blocks={})
@@ -1718,6 +1742,8 @@ class Collection(RichCollectionBase):
             if trash_at:
                 t = trash_at.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
                 body["trash_at"] = t
+            if preserve_version:
+                body["preserve_version"] = preserve_version
 
             self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))
             text = self._api_response["manifest_text"]
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
index 6d2643a96..f91783250 100644
--- a/sdk/python/tests/run_test_server.py
+++ b/sdk/python/tests/run_test_server.py
@@ -791,6 +791,7 @@ def setup_config():
                     "UserProfileNotificationAddress": "arvados at example.com",
                 },
                 "Collections": {
+                    "CollectionVersioning": True,
                     "BlobSigningKey": "zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc",
                     "TrustAllContent": False,
                     "ForwardSlashNameSubstitution": "/",
diff --git a/sdk/python/tests/test_collections.py b/sdk/python/tests/test_collections.py
index f821ff952..a43e0d40d 100644
--- a/sdk/python/tests/test_collections.py
+++ b/sdk/python/tests/test_collections.py
@@ -1360,6 +1360,25 @@ class NewCollectionTestCaseWithServersAndTokens(run_test_server.TestCaseWithServ
 
 
 class NewCollectionTestCaseWithServers(run_test_server.TestCaseWithServers):
+    def test_preserve_version_on_save(self):
+        c = Collection()
+        c.save_new(preserve_version=True)
+        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+        self.assertEqual(coll_record['version'], 1)
+        self.assertEqual(coll_record['preserve_version'], True)
+        with c.open("foo.txt", "wb") as foo:
+            foo.write(b"foo")
+        c.save(preserve_version=True)
+        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+        self.assertEqual(coll_record['version'], 2)
+        self.assertEqual(coll_record['preserve_version'], True)
+        with c.open("bar.txt", "wb") as foo:
+            foo.write(b"bar")
+        c.save(preserve_version=False)
+        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()
+        self.assertEqual(coll_record['version'], 3)
+        self.assertEqual(coll_record['preserve_version'], False)
+
     def test_get_manifest_text_only_committed(self):
         c = Collection()
         with c.open("count.txt", "wb") as f:

commit 9e3e3bcd81a4fc80e1aaa33e7a1711a74099e0e4
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Mon Nov 15 15:12:19 2021 -0300

    Merge branch '18215-select-param-update-create' into main. Refs #18215
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/lib/controller/router/response.go b/lib/controller/router/response.go
index 03cdcf18d..01126bcb4 100644
--- a/lib/controller/router/response.go
+++ b/lib/controller/router/response.go
@@ -26,6 +26,10 @@ type responseOptions struct {
 func (rtr *router) responseOptions(opts interface{}) (responseOptions, error) {
 	var rOpts responseOptions
 	switch opts := opts.(type) {
+	case *arvados.CreateOptions:
+		rOpts.Select = opts.Select
+	case *arvados.UpdateOptions:
+		rOpts.Select = opts.Select
 	case *arvados.GetOptions:
 		rOpts.Select = opts.Select
 	case *arvados.ListOptions:
diff --git a/lib/controller/router/router_test.go b/lib/controller/router/router_test.go
index 722895645..ce440dac5 100644
--- a/lib/controller/router/router_test.go
+++ b/lib/controller/router/router_test.go
@@ -379,6 +379,7 @@ func (s *RouterIntegrationSuite) TestFullTimestampsInResponse(c *check.C) {
 func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
 	uuid := arvadostest.QueuedContainerUUID
 	token := arvadostest.ActiveTokenV2
+	// GET
 	for _, sel := range [][]string{
 		{"uuid", "command"},
 		{"uuid", "command", "uuid"},
@@ -395,6 +396,26 @@ func (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {
 		_, hasMounts := resp["mounts"]
 		c.Check(hasMounts, check.Equals, false)
 	}
+	// POST & PUT
+	uuid = arvadostest.FooCollection
+	j, err := json.Marshal([]string{"uuid", "description"})
+	c.Assert(err, check.IsNil)
+	for _, method := range []string{"PUT", "POST"} {
+		desc := "Today is " + time.Now().String()
+		reqBody := "{\"description\":\"" + desc + "\"}"
+		var resp map[string]interface{}
+		var rr *httptest.ResponseRecorder
+		if method == "PUT" {
+			_, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections/"+uuid+"?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+		} else {
+			_, rr, resp = doRequest(c, s.rtr, token, method, "/arvados/v1/collections?select="+string(j), nil, bytes.NewReader([]byte(reqBody)))
+		}
+		c.Check(rr.Code, check.Equals, http.StatusOK)
+		c.Check(resp["kind"], check.Equals, "arvados#collection")
+		c.Check(resp["uuid"], check.HasLen, 27)
+		c.Check(resp["description"], check.Equals, desc)
+		c.Check(resp["manifest_text"], check.IsNil)
+	}
 }
 
 func (s *RouterIntegrationSuite) TestHEAD(c *check.C) {

commit bd8ee613953e8cbcbb572b648e87602397ba31bb
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Mon Nov 15 12:50:54 2021 -0500

    Merge branch '18316-fuse-read-only' refs #18316
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/services/fuse/arvados_fuse/command.py b/services/fuse/arvados_fuse/command.py
index 67a2aaa4d..5f0a1f80f 100644
--- a/services/fuse/arvados_fuse/command.py
+++ b/services/fuse/arvados_fuse/command.py
@@ -244,7 +244,7 @@ class Mount(object):
         usr = self.api.users().current().execute(num_retries=self.args.retries)
         now = time.time()
         dir_class = None
-        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries]
+        dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
         mount_readme = False
 
         storage_classes = None
@@ -310,7 +310,7 @@ class Mount(object):
             return
 
         e = self.operations.inodes.add_entry(Directory(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api.config))
+            llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
         dir_args[0] = e.inode
 
         for name in self.args.mount_by_id:
diff --git a/services/fuse/arvados_fuse/fusedir.py b/services/fuse/arvados_fuse/fusedir.py
index d5a018ae8..a2e33c7b3 100644
--- a/services/fuse/arvados_fuse/fusedir.py
+++ b/services/fuse/arvados_fuse/fusedir.py
@@ -36,7 +36,7 @@ class Directory(FreshBase):
     and the value referencing a File or Directory object.
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig):
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write):
         """parent_inode is the integer inode number"""
 
         super(Directory, self).__init__()
@@ -49,6 +49,7 @@ class Directory(FreshBase):
         self.apiconfig = apiconfig
         self._entries = {}
         self._mtime = time.time()
+        self._enable_write = enable_write
 
     def forward_slash_subst(self):
         if not hasattr(self, '_fsns'):
@@ -269,8 +270,8 @@ class CollectionDirectoryBase(Directory):
 
     """
 
-    def __init__(self, parent_inode, inodes, apiconfig, collection):
-        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig)
+    def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection):
+        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
         self.apiconfig = apiconfig
         self.collection = collection
 
@@ -284,10 +285,10 @@ class CollectionDirectoryBase(Directory):
             item.fuse_entry.dead = False
             self._entries[name] = item.fuse_entry
         elif isinstance(item, arvados.collection.RichCollectionBase):
-            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, item))
+            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item))
             self._entries[name].populate(mtime)
         else:
-            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime))
+            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
         item.fuse_entry = self._entries[name]
 
     def on_event(self, event, collection, name, item):
@@ -348,28 +349,36 @@ class CollectionDirectoryBase(Directory):
                 self.new_entry(entry, item, self.mtime())
 
     def writable(self):
-        return self.collection.writable()
+        return self._enable_write and self.collection.writable()
 
     @use_counter
     def flush(self):
+        if not self.writable():
+            return
         with llfuse.lock_released:
             self.collection.root_collection().save()
 
     @use_counter
     @check_update
     def create(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.open(name, "w").close()
 
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.mkdirs(name)
 
     @use_counter
     @check_update
     def unlink(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -377,6 +386,8 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
         with llfuse.lock_released:
             self.collection.remove(name)
         self.flush()
@@ -384,6 +395,9 @@ class CollectionDirectoryBase(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, CollectionDirectoryBase):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -413,8 +427,8 @@ class CollectionDirectoryBase(Directory):
 class CollectionDirectory(CollectionDirectoryBase):
     """Represents the root of a directory tree representing a collection."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, collection_record=None, explicit_collection=None):
-        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, None)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
+        super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None)
         self.api = api
         self.num_retries = num_retries
         self.collection_record_file = None
@@ -434,14 +448,14 @@ class CollectionDirectory(CollectionDirectoryBase):
             self._mtime = 0
         self._manifest_size = 0
         if self.collection_locator:
-            self._writable = (uuid_pattern.match(self.collection_locator) is not None)
+            self._writable = (uuid_pattern.match(self.collection_locator) is not None) and enable_write
         self._updating_lock = threading.Lock()
 
     def same(self, i):
         return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator
 
     def writable(self):
-        return self.collection.writable() if self.collection is not None else self._writable
+        return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)
 
     def want_event_subscribe(self):
         return (uuid_pattern.match(self.collection_locator) is not None)
@@ -603,14 +617,16 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
         def save_new(self):
             pass
 
-    def __init__(self, parent_inode, inodes, api_client, num_retries, storage_classes=None):
+    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
         collection = self.UnsaveableCollection(
             api_client=api_client,
             keep_client=api_client.keep,
             num_retries=num_retries,
             storage_classes_desired=storage_classes)
+        # This is always enable_write=True because it never tries to
+        # save to the backend
         super(TmpCollectionDirectory, self).__init__(
-            parent_inode, inodes, api_client.config, collection)
+            parent_inode, inodes, api_client.config, True, collection)
         self.collection_record_file = None
         self.populate(self.mtime())
 
@@ -703,8 +719,8 @@ and the directory will appear if it exists.
 
 """.lstrip()
 
-    def __init__(self, parent_inode, inodes, api, num_retries, pdh_only=False, storage_classes=None):
-        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
+        super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.pdh_only = pdh_only
@@ -720,7 +736,8 @@ and the directory will appear if it exists.
             # If we're the root directory, add an identical by_id subdirectory.
             if self.inode == llfuse.ROOT_INODE:
                 self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, self.pdh_only))
+                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                    self.pdh_only))
 
     def __contains__(self, k):
         if k in self._entries:
@@ -738,11 +755,11 @@ and the directory will appear if it exists.
                 if project[u'items_available'] == 0:
                     return False
                 e = self.inodes.add_entry(ProjectDirectory(
-                    self.inode, self.inodes, self.api, self.num_retries,
+                    self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
                     project[u'items'][0], storage_classes=self.storage_classes))
             else:
                 e = self.inodes.add_entry(CollectionDirectory(
-                        self.inode, self.inodes, self.api, self.num_retries, k))
+                        self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
 
             if e.update():
                 if k not in self._entries:
@@ -776,8 +793,8 @@ and the directory will appear if it exists.
 class TagsDirectory(Directory):
     """A special directory that contains as subdirectories all tags visible to the user."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, poll_time=60):
-        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config)
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
+        super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self._poll = True
@@ -798,7 +815,8 @@ class TagsDirectory(Directory):
             self.merge(tags['items']+[{"name": n} for n in self._extra],
                        lambda i: i['name'],
                        lambda a, i: a.tag == i['name'],
-                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, i['name'], poll=self._poll, poll_time=self._poll_time))
+                       lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                              i['name'], poll=self._poll, poll_time=self._poll_time))
 
     @use_counter
     @check_update
@@ -832,9 +850,9 @@ class TagDirectory(Directory):
     to the user that are tagged with a particular tag.
     """
 
-    def __init__(self, parent_inode, inodes, api, num_retries, tag,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
                  poll=False, poll_time=60):
-        super(TagDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.tag = tag
@@ -856,15 +874,15 @@ class TagDirectory(Directory):
         self.merge(taggedcollections['items'],
                    lambda i: i['head_uuid'],
                    lambda a, i: a.collection_locator == i['head_uuid'],
-                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid']))
+                   lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
 
 
 class ProjectDirectory(Directory):
     """A special directory that contains the contents of a project."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, project_object,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
                  poll=True, poll_time=3, storage_classes=None):
-        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.project_object = project_object
@@ -882,12 +900,13 @@ class ProjectDirectory(Directory):
 
     def createDirectory(self, i):
         if collection_uuid_pattern.match(i['uuid']):
-            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i)
+            return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
         elif group_uuid_pattern.match(i['uuid']):
-            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i, self._poll, self._poll_time, self.storage_classes)
+            return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                    i, self._poll, self._poll_time, self.storage_classes)
         elif link_uuid_pattern.match(i['uuid']):
             if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
-                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, i['head_uuid'])
+                return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
             else:
                 return None
         elif uuid_pattern.match(i['uuid']):
@@ -1022,6 +1041,8 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def writable(self):
+        if not self._enable_write:
+            return False
         with llfuse.lock_released:
             if not self._current_user:
                 self._current_user = self.api.users().current().execute(num_retries=self.num_retries)
@@ -1033,6 +1054,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def mkdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         try:
             with llfuse.lock_released:
                 c = {
@@ -1053,6 +1077,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rmdir(self, name):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if name not in self:
             raise llfuse.FUSEError(errno.ENOENT)
         if not isinstance(self[name], CollectionDirectory):
@@ -1066,6 +1093,9 @@ class ProjectDirectory(Directory):
     @use_counter
     @check_update
     def rename(self, name_old, name_new, src):
+        if not self.writable():
+            raise llfuse.FUSEError(errno.EROFS)
+
         if not isinstance(src, ProjectDirectory):
             raise llfuse.FUSEError(errno.EPERM)
 
@@ -1138,9 +1168,9 @@ class ProjectDirectory(Directory):
 class SharedDirectory(Directory):
     """A special directory that represents users or groups who have shared projects with me."""
 
-    def __init__(self, parent_inode, inodes, api, num_retries, exclude,
+    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
                  poll=False, poll_time=60, storage_classes=None):
-        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config)
+        super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
         self.api = api
         self.num_retries = num_retries
         self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1231,7 +1261,8 @@ class SharedDirectory(Directory):
             self.merge(contents.items(),
                        lambda i: i[0],
                        lambda a, i: a.uuid() == i[1]['uuid'],
-                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+                       lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
+                                                  i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
         except Exception:
             _logger.exception("arv-mount shared dir error")
         finally:
diff --git a/services/fuse/arvados_fuse/fusefile.py b/services/fuse/arvados_fuse/fusefile.py
index 116b5462b..45d3db16f 100644
--- a/services/fuse/arvados_fuse/fusefile.py
+++ b/services/fuse/arvados_fuse/fusefile.py
@@ -50,11 +50,12 @@ class File(FreshBase):
 class FuseArvadosFile(File):
     """Wraps a ArvadosFile."""
 
-    __slots__ = ('arvfile',)
+    __slots__ = ('arvfile', '_enable_write')
 
-    def __init__(self, parent_inode, arvfile, _mtime):
+    def __init__(self, parent_inode, arvfile, _mtime, enable_write):
         super(FuseArvadosFile, self).__init__(parent_inode, _mtime)
         self.arvfile = arvfile
+        self._enable_write = enable_write
 
     def size(self):
         with llfuse.lock_released:
@@ -72,7 +73,7 @@ class FuseArvadosFile(File):
         return False
 
     def writable(self):
-        return self.arvfile.writable()
+        return self._enable_write and self.arvfile.writable()
 
     def flush(self):
         with llfuse.lock_released:
diff --git a/services/fuse/tests/mount_test_base.py b/services/fuse/tests/mount_test_base.py
index fe2ff929d..7cf8aa373 100644
--- a/services/fuse/tests/mount_test_base.py
+++ b/services/fuse/tests/mount_test_base.py
@@ -57,12 +57,15 @@ class MountTestBase(unittest.TestCase):
         llfuse.close()
 
     def make_mount(self, root_class, **root_kwargs):
+        enable_write = True
+        if 'enable_write' in root_kwargs:
+            enable_write = root_kwargs.pop('enable_write')
         self.operations = fuse.Operations(
             os.getuid(), os.getgid(),
             api_client=self.api,
-            enable_write=True)
+            enable_write=enable_write)
         self.operations.inodes.add_entry(root_class(
-            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, **root_kwargs))
+            llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
         llfuse.init(self.operations, self.mounttmp, [])
         self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
         self.llfuse_thread.daemon = True
diff --git a/services/fuse/tests/test_mount.py b/services/fuse/tests/test_mount.py
index 157f55e4a..ece316193 100644
--- a/services/fuse/tests/test_mount.py
+++ b/services/fuse/tests/test_mount.py
@@ -1113,7 +1113,7 @@ class MagicDirApiError(FuseMagicTest):
 
 class SanitizeFilenameTest(MountTestBase):
     def test_sanitize_filename(self):
-        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, project_object=self.api.users().current().execute())
+        pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
         acceptable = [
             "foo.txt",
             ".foo",
@@ -1293,3 +1293,25 @@ class StorageClassesTest(IntegrationTest):
     @staticmethod
     def _test_collection_custom_storage_classes(self, coll):
         self.assertEqual(storage_classes_desired(coll), ['foo'])
+
+def _readonlyCollectionTestHelper(mounttmp):
+    f = open(os.path.join(mounttmp, 'thing1.txt'), 'rt')
+    # Testing that close() doesn't raise an error.
+    f.close()
+
+class ReadonlyCollectionTest(MountTestBase):
+    def setUp(self):
+        super(ReadonlyCollectionTest, self).setUp()
+        cw = arvados.collection.Collection()
+        with cw.open('thing1.txt', 'wt') as f:
+            f.write("data 1")
+        cw.save_new(owner_uuid=run_test_server.fixture("groups")["aproject"]["uuid"])
+        self.testcollection = cw.api_response()
+
+    def runTest(self):
+        settings = arvados.config.settings().copy()
+        settings["ARVADOS_API_TOKEN"] = run_test_server.fixture("api_client_authorizations")["project_viewer"]["api_token"]
+        self.api = arvados.safeapi.ThreadSafeApiCache(settings)
+        self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection, enable_write=False)
+
+        self.pool.apply(_readonlyCollectionTestHelper, (self.mounttmp,))

commit 773413b6decf25e4ab669881e00c507aa8a1486f
Author: Tom Clegg <tom at curii.com>
Date:   Thu Nov 11 15:47:05 2021 -0500

    Merge branch '16817-users-visible-upon-activation'
    
    closes #16817
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index be1103243..dfee7e0b5 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -39,6 +39,10 @@ h2(#main). development main (as of 2021-11-10)
 
 "previous: Upgrading from 2.3.0":#v2_3_0
 
+h3. Users are visible to other users by default
+
+When a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false at .
+
 h3. Dedicated keepstore process for each container
 
 When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 9971d3cae..bbdbe6ab9 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -265,6 +265,16 @@ Clusters:
       # user agreements.  Should only be enabled for development.
       NewUsersAreActive: false
 
+      # Newly activated users (whether set up by an admin or via
+      # AutoSetupNewUsers) immediately become visible to other active
+      # users.
+      #
+      # On a multi-tenant cluster, where the intent is for users to be
+      # invisible to one another unless they have been added to the
+      # same group(s) via Workbench admin interface, change this to
+      # false.
+      ActivatedUsersAreVisibleToOthers: true
+
       # The e-mail address of the user you would like to become marked as an admin
       # user on their first login.
       AutoAdminUserWithEmail: ""
diff --git a/lib/config/export.go b/lib/config/export.go
index f2c15b0ee..1d2ea6c98 100644
--- a/lib/config/export.go
+++ b/lib/config/export.go
@@ -214,6 +214,7 @@ var whitelist = map[string]bool{
 	"SystemRootToken":                                     false,
 	"TLS":                                                 false,
 	"Users":                                               true,
+	"Users.ActivatedUsersAreVisibleToOthers":              false,
 	"Users.AdminNotifierEmailFrom":                        false,
 	"Users.AnonymousUserToken":                            true,
 	"Users.AutoAdminFirstUser":                            false,
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index 4b4248db6..576eb0c00 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -271,6 +271,16 @@ Clusters:
       # user agreements.  Should only be enabled for development.
       NewUsersAreActive: false
 
+      # Newly activated users (whether set up by an admin or via
+      # AutoSetupNewUsers) immediately become visible to other active
+      # users.
+      #
+      # On a multi-tenant cluster, where the intent is for users to be
+      # invisible to one another unless they have been added to the
+      # same group(s) via Workbench admin interface, change this to
+      # false.
+      ActivatedUsersAreVisibleToOthers: true
+
       # The e-mail address of the user you would like to become marked as an admin
       # user on their first login.
       AutoAdminUserWithEmail: ""
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index 1cd002082..dcb81285c 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -223,6 +223,7 @@ type Cluster struct {
 		Insecure    bool
 	}
 	Users struct {
+		ActivatedUsersAreVisibleToOthers      bool
 		AnonymousUserToken                    string
 		AdminNotifierEmailFrom                string
 		AutoAdminFirstUser                    bool
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 366c03e30..febb8ea51 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -234,8 +234,9 @@ SELECT target_uuid, perm_level
                               name: 'can_read').empty?
 
     # Add can_read link from this user to "all users" which makes this
-    # user "invited"
-    group_perm = create_user_group_link
+    # user "invited", and (depending on config) a link in the opposite
+    # direction which makes this user visible to other users.
+    group_perms = add_to_all_users_group
 
     # Add git repo
     repo_perm = if (!repo_name.nil? || Rails.configuration.Users.AutoSetupNewUsersWithRepository) and !username.nil?
@@ -267,7 +268,7 @@ SELECT target_uuid, perm_level
 
     forget_cached_group_perms
 
-    return [repo_perm, vm_login_perm, group_perm, self].compact
+    return [repo_perm, vm_login_perm, *group_perms, self].compact
   end
 
   # delete user signatures, login, repo, and vm perms, and mark as inactive
@@ -728,16 +729,26 @@ SELECT target_uuid, perm_level
     login_perm
   end
 
-  # add the user to the 'All users' group
-  def create_user_group_link
-    return (Link.where(tail_uuid: self.uuid,
+  def add_to_all_users_group
+    resp = [Link.where(tail_uuid: self.uuid,
                        head_uuid: all_users_group_uuid,
                        link_class: 'permission',
-                       name: 'can_read').first or
+                       name: 'can_read').first ||
             Link.create(tail_uuid: self.uuid,
                         head_uuid: all_users_group_uuid,
                         link_class: 'permission',
-                        name: 'can_read'))
+                        name: 'can_read')]
+    if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers
+      resp += [Link.where(tail_uuid: all_users_group_uuid,
+                          head_uuid: self.uuid,
+                          link_class: 'permission',
+                          name: 'can_read').first ||
+               Link.create(tail_uuid: all_users_group_uuid,
+                           head_uuid: self.uuid,
+                           link_class: 'permission',
+                           name: 'can_read')]
+    end
+    return resp
   end
 
   # Give the special "System group" permission to manage this user and
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
index c807a7d6c..ae7b21dec 100644
--- a/services/api/test/functional/arvados/v1/users_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/users_controller_test.rb
@@ -13,6 +13,7 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
     @initial_link_count = Link.count
     @vm_uuid = virtual_machines(:testvm).uuid
     ActionMailer::Base.deliveries = []
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
   end
 
   test "activate a user after signing UA" do
diff --git a/services/api/test/unit/permission_test.rb b/services/api/test/unit/permission_test.rb
index 123031b35..128d0ebaa 100644
--- a/services/api/test/unit/permission_test.rb
+++ b/services/api/test/unit/permission_test.rb
@@ -218,6 +218,7 @@ class PermissionTest < ActiveSupport::TestCase
   end
 
   test "manager user gets permission to minions' articles via can_manage link" do
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
     manager = create :active_user, first_name: "Manage", last_name: "Er"
     minion = create :active_user, first_name: "Min", last_name: "Ion"
     minions_specimen = act_as_user minion do
@@ -314,6 +315,7 @@ class PermissionTest < ActiveSupport::TestCase
   end
 
   test "users with bidirectional read permission in group can see each other, but cannot see each other's private articles" do
+    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false
     a = create :active_user, first_name: "A"
     b = create :active_user, first_name: "B"
     other = create :active_user, first_name: "OTHER"
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
index c00164c0a..7368d8937 100644
--- a/services/api/test/unit/user_test.rb
+++ b/services/api/test/unit/user_test.rb
@@ -447,30 +447,40 @@ class UserTest < ActiveSupport::TestCase
     assert_not_allowed { User.new.save }
   end
 
-  test "setup new user" do
-    set_user_from_auth :admin
+  [true, false].each do |visible|
+    test "setup new user with ActivatedUsersAreVisibleToOthers=#{visible}" do
+      Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = visible
+      set_user_from_auth :admin
 
-    email = 'foo at example.com'
+      email = 'foo at example.com'
 
-    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
+      user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})
 
-    vm = VirtualMachine.create
+      vm = VirtualMachine.create
 
-    response = user.setup(repo_name: 'foo/testrepo',
-                          vm_uuid: vm.uuid)
+      response = user.setup(repo_name: 'foo/testrepo',
+                            vm_uuid: vm.uuid)
 
-    resp_user = find_obj_in_resp response, 'User'
-    verify_user resp_user, email
+      resp_user = find_obj_in_resp response, 'User'
+      verify_user resp_user, email
 
-    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
-    verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
+      group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
+      verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
 
-    repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
-    verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+      group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+      if visible
+        verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+      else
+        assert_nil group_perm2
+      end
 
-    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
-    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
-    assert_equal("foo", vm_perm.properties["username"])
+      repo_perm = find_obj_in_resp response, 'Link', 'arvados#repository'
+      verify_link repo_perm, 'permission', 'can_manage', resp_user[:uuid], nil
+
+      vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'
+      verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid
+      assert_equal("foo", vm_perm.properties["username"])
+    end
   end
 
   test "setup new user with junk in database" do
@@ -514,6 +524,9 @@ class UserTest < ActiveSupport::TestCase
     group_perm = find_obj_in_resp response, 'Link', 'arvados#group'
     verify_link group_perm, 'permission', 'can_read', resp_user[:uuid], nil
 
+    group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'
+    verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil
+
     # invoke setup again with repo_name
     response = user.setup(repo_name: 'foo/testrepo')
     resp_user = find_obj_in_resp response, 'User', nil
@@ -560,7 +573,7 @@ class UserTest < ActiveSupport::TestCase
           break
         end
       else  # looking for a link
-        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind
+        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).andand.kind == head_kind
           return_obj = x
           break
         end

commit 28e35c535b8fd442dce3a286c4503517dc848848
Author: Tom Clegg <tom at curii.com>
Date:   Mon Nov 1 14:49:29 2021 -0400

    12859: Fix unclosed file descriptors in local filesystem driver.
    
    Temporary file was not being closed/removed in the case where client
    disconnection is detected while waiting for the volume-level serialize
    lock.
    
    Also, GetDeviceID was leaking one file descriptor per volume at
    startup time.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index a74616604..f076ccf18 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -135,6 +135,7 @@ func (v *UnixVolume) GetDeviceID() string {
 	if err != nil {
 		return giveup("opening %q: %s", udir, err)
 	}
+	defer d.Close()
 	uuids, err := d.Readdirnames(0)
 	if err != nil {
 		return giveup("reading %q: %s", udir, err)
@@ -274,29 +275,25 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
 		return fmt.Errorf("error creating directory %s: %s", bdir, err)
 	}
 
-	tmpfile, tmperr := v.os.TempFile(bdir, "tmp"+loc)
-	if tmperr != nil {
-		return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, tmperr)
-	}
-
 	bpath := v.blockPath(loc)
+	tmpfile, err := v.os.TempFile(bdir, "tmp"+loc)
+	if err != nil {
+		return fmt.Errorf("TempFile(%s, tmp%s) failed: %s", bdir, loc, err)
+	}
+	defer v.os.Remove(tmpfile.Name())
+	defer tmpfile.Close()
 
-	if err := v.lock(ctx); err != nil {
+	if err = v.lock(ctx); err != nil {
 		return err
 	}
 	defer v.unlock()
 	n, err := io.Copy(tmpfile, rdr)
 	v.os.stats.TickOutBytes(uint64(n))
 	if err != nil {
-		err = fmt.Errorf("error writing %s: %s", bpath, err)
-		tmpfile.Close()
-		v.os.Remove(tmpfile.Name())
-		return err
+		return fmt.Errorf("error writing %s: %s", bpath, err)
 	}
-	if err := tmpfile.Close(); err != nil {
-		err = fmt.Errorf("error closing %s: %s", tmpfile.Name(), err)
-		v.os.Remove(tmpfile.Name())
-		return err
+	if err = tmpfile.Close(); err != nil {
+		return fmt.Errorf("error closing %s: %s", tmpfile.Name(), err)
 	}
 	// ext4 uses a low-precision clock and effectively backdates
 	// files by up to 10 ms, sometimes across a 1-second boundary,
@@ -307,14 +304,10 @@ func (v *UnixVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
 	v.os.stats.TickOps("utimes")
 	v.os.stats.Tick(&v.os.stats.UtimesOps)
 	if err = os.Chtimes(tmpfile.Name(), ts, ts); err != nil {
-		err = fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
-		v.os.Remove(tmpfile.Name())
-		return err
+		return fmt.Errorf("error setting timestamps on %s: %s", tmpfile.Name(), err)
 	}
-	if err := v.os.Rename(tmpfile.Name(), bpath); err != nil {
-		err = fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
-		v.os.Remove(tmpfile.Name())
-		return err
+	if err = v.os.Rename(tmpfile.Name(), bpath); err != nil {
+		return fmt.Errorf("error renaming %s to %s: %s", tmpfile.Name(), bpath, err)
 	}
 	return nil
 }

commit 1dc17e4eee5367c7684888c8dcaa6445b576537c
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Tue Nov 2 10:04:48 2021 -0300

    18318: Updates the nokogiri dependency on API & WB1.
    
    Addresses https://nvd.nist.gov/vuln/detail/CVE-2021-41098
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/apps/workbench/Gemfile.lock b/apps/workbench/Gemfile.lock
index ab9256a38..13c443096 100644
--- a/apps/workbench/Gemfile.lock
+++ b/apps/workbench/Gemfile.lock
@@ -178,7 +178,7 @@ GEM
       mime-types-data (~> 3.2015)
     mime-types-data (3.2019.0331)
     mini_mime (1.1.0)
-    mini_portile2 (2.5.3)
+    mini_portile2 (2.6.1)
     minitest (5.10.3)
     mocha (1.8.0)
       metaclass (~> 0.0.1)
@@ -194,8 +194,8 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.5.7)
-    nokogiri (1.11.7)
-      mini_portile2 (~> 2.5.0)
+    nokogiri (1.12.5)
+      mini_portile2 (~> 2.6.1)
       racc (~> 1.4)
     npm-rails (0.2.1)
       rails (>= 3.2)
@@ -214,7 +214,7 @@ GEM
       multi_json (~> 1.0)
       websocket-driver (>= 0.2.0)
     public_suffix (4.0.6)
-    racc (1.5.2)
+    racc (1.6.0)
     rack (2.2.3)
     rack-mini-profiler (1.0.2)
       rack (>= 1.2.0)
diff --git a/services/api/Gemfile.lock b/services/api/Gemfile.lock
index 6e149d45a..bdf791153 100644
--- a/services/api/Gemfile.lock
+++ b/services/api/Gemfile.lock
@@ -142,7 +142,7 @@ GEM
     metaclass (0.0.4)
     method_source (1.0.0)
     mini_mime (1.1.0)
-    mini_portile2 (2.5.3)
+    mini_portile2 (2.6.1)
     minitest (5.10.3)
     mocha (1.8.0)
       metaclass (~> 0.0.1)
@@ -156,8 +156,8 @@ GEM
     net-ssh-gateway (2.0.0)
       net-ssh (>= 4.0.0)
     nio4r (2.5.7)
-    nokogiri (1.11.7)
-      mini_portile2 (~> 2.5.0)
+    nokogiri (1.12.5)
+      mini_portile2 (~> 2.6.1)
       racc (~> 1.4)
     oj (3.9.2)
     optimist (3.0.0)
@@ -168,7 +168,7 @@ GEM
     pg (1.1.4)
     power_assert (1.1.4)
     public_suffix (4.0.6)
-    racc (1.5.2)
+    racc (1.6.0)
     rack (2.2.3)
     rack-test (1.1.0)
       rack (>= 1.0, < 3)

commit bc9d8d1e4caeef8c4b2da02f9a134fc7b57148d7
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 29 11:29:34 2021 -0400

    18309: remove faraday dependency in the arvados-login-sync gem, instead
           depend on the correct version of the arvados-google-api-client
           gem.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/login-sync/arvados-login-sync.gemspec b/services/login-sync/arvados-login-sync.gemspec
index 826b4607e..f7fe4bc16 100644
--- a/services/login-sync/arvados-login-sync.gemspec
+++ b/services/login-sync/arvados-login-sync.gemspec
@@ -39,8 +39,8 @@ Gem::Specification.new do |s|
   s.required_ruby_version = '>= 2.1.0'
   s.add_runtime_dependency 'arvados', '>= 1.3.3.20190320201707'
   s.add_runtime_dependency 'launchy', '< 2.5'
-  # arvados-google-api-client 0.8.7.2 is incompatible with faraday 0.16.2
-  s.add_dependency('faraday', '< 0.16')
+  # We need at least version 0.8.7.3, cf. https://dev.arvados.org/issues/15673
+  s.add_dependency('arvados-google-api-client', '>= 0.8.7.3', '< 0.8.9')
   # arvados-google-api-client (and thus arvados) gems
   # depend on signet, but signet 0.12 is incompatible with ruby 2.3.
   s.add_dependency('signet', '< 0.12')

commit ac92153c3aa05af1755b1afe225d3355fcca160d
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 28 19:47:05 2021 -0400

    Controller test fix.
    
    refs #18183
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/lib/controller/integration_test.go b/lib/controller/integration_test.go
index f2f88eb25..4cf6a6832 100644
--- a/lib/controller/integration_test.go
+++ b/lib/controller/integration_test.go
@@ -831,11 +831,9 @@ func (s *IntegrationSuite) TestListUsers(c *check.C) {
 	}
 	c.Check(found, check.Equals, true)
 
-	// Deactivated user can see is_active==false via "get current
-	// user" API
+	// Deactivated user no longer has working token
 	user1, err = conn3.UserGetCurrent(userctx1, arvados.GetOptions{})
-	c.Assert(err, check.IsNil)
-	c.Check(user1.IsActive, check.Equals, false)
+	c.Assert(err, check.ErrorMatches, `.*401 Unauthorized.*`)
 }
 
 func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {

commit 73d113eab7fef74d9519be5236e89b48aeb2eab2
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 28 14:10:40 2021 -0400

    Another rails test fix.
    
    refs #18183
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/api/test/integration/users_test.rb b/services/api/test/integration/users_test.rb
index 81168e15b..f3e787e3d 100644
--- a/services/api/test/integration/users_test.rb
+++ b/services/api/test/integration/users_test.rb
@@ -434,20 +434,26 @@ class UsersTest < ActionDispatch::IntegrationTest
         params: {},
         headers: {"HTTP_AUTHORIZATION" => "Bearer #{token}"})
     assert_response(:success)
-    user = json_response
-    assert_equal true, user['is_active']
+    userJSON = json_response
+    assert_equal true, userJSON['is_active']
 
     post("/arvados/v1/users/#{user['uuid']}/unsetup",
         params: {},
         headers: auth(:admin))
     assert_response :success
 
+    # Need to get a new token, the old one was invalidated by the unsetup call
+    act_as_system_user do
+      ap = ApiClientAuthorization.create!(user: user, api_client_id: 0)
+      token = ap.api_token
+    end
+
     get("/arvados/v1/users/#{user['uuid']}",
         params: {},
         headers: {"HTTP_AUTHORIZATION" => "Bearer #{token}"})
     assert_response(:success)
-    user = json_response
-    assert_equal false, user['is_active']
+    userJSON = json_response
+    assert_equal false, userJSON['is_active']
 
     post("/arvados/v1/users/#{user['uuid']}/activate",
         params: {},

commit dcd9dd1ec965190dcece4b8ef3f9379776a309e8
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 28 12:48:02 2021 -0400

    Update the test db structure.
    
    refs #18183
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/api/db/structure.sql b/services/api/db/structure.sql
index 2f7748335..da9959593 100644
--- a/services/api/db/structure.sql
+++ b/services/api/db/structure.sql
@@ -3146,6 +3146,7 @@ INSERT INTO "schema_migrations" (version) VALUES
 ('20210108033940'),
 ('20210126183521'),
 ('20210621204455'),
-('20210816191509');
+('20210816191509'),
+('20211027154300');
 
 

commit 7dd7f8d08b1bbf4692b2f1678d78047489b6fd37
Author: Ward Vandewege <ward at curii.com>
Date:   Wed Oct 27 15:48:54 2021 -0400

    18183: add a database migration that deletes tokens and ssh keys that
           belong to inactive users.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/api/db/migrate/20211027154300_delete_disabled_user_tokens_and_keys.rb b/services/api/db/migrate/20211027154300_delete_disabled_user_tokens_and_keys.rb
new file mode 100644
index 000000000..df3db6f5f
--- /dev/null
+++ b/services/api/db/migrate/20211027154300_delete_disabled_user_tokens_and_keys.rb
@@ -0,0 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+class DeleteDisabledUserTokensAndKeys < ActiveRecord::Migration[5.2]
+  def up
+    execute "delete from api_client_authorizations where user_id in (select id from users where is_active ='false' and uuid not like '%-tpzed-anonymouspublic' and uuid not like '%-tpzed-000000000000000')"
+    execute "delete from authorized_keys where owner_uuid in (select uuid from users where is_active ='false' and uuid not like '%-tpzed-anonymouspublic' and uuid not like '%-tpzed-000000000000000')"
+    execute "delete from authorized_keys where authorized_user_uuid in (select uuid from users where is_active ='false' and uuid not like '%-tpzed-anonymouspublic' and uuid not like '%-tpzed-000000000000000')"
+  end
+
+  def down
+    # This migration is not reversible.
+  end
+end

commit a6f94a674bdbb99cc3fb19cff6a7ffbf4c3520ee
Author: Ward Vandewege <ward at curii.com>
Date:   Wed Oct 27 15:05:00 2021 -0400

    18183: When the user unsetup api endpoint is hit, any tokens owned by
           the user should be deleted.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 2e862d3ae..366c03e30 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -300,6 +300,12 @@ SELECT target_uuid, perm_level
     Link.where(link_class: 'signature',
                      tail_uuid: self.uuid).destroy_all
 
+    # delete tokens for this user
+    ApiClientAuthorization.where(user_id: self.id).destroy_all
+    # delete ssh keys for this user
+    AuthorizedKey.where(owner_uuid: self.uuid).destroy_all
+    AuthorizedKey.where(authorized_user_uuid: self.uuid).destroy_all
+
     # delete user preferences (including profile)
     self.prefs = {}
 
diff --git a/services/api/test/integration/users_test.rb b/services/api/test/integration/users_test.rb
index b24ddc5a5..81168e15b 100644
--- a/services/api/test/integration/users_test.rb
+++ b/services/api/test/integration/users_test.rb
@@ -198,6 +198,13 @@ class UsersTest < ActionDispatch::IntegrationTest
 
     verify_link_existence created['uuid'], created['email'], true, true, true, true, false
 
+    # create a token
+    token = act_as_system_user do
+      ApiClientAuthorization.create!(user: User.find_by_uuid(created['uuid']), api_client: ApiClient.all.first).api_token
+    end
+
+    assert_equal 1, ApiClientAuthorization.where(user_id: User.find_by_uuid(created['uuid']).id).size, 'expected token not found'
+
     post "/arvados/v1/users/#{created['uuid']}/unsetup", params: {}, headers: auth(:admin)
 
     assert_response :success
@@ -205,6 +212,7 @@ class UsersTest < ActionDispatch::IntegrationTest
     created2 = json_response
     assert_not_nil created2['uuid'], 'expected uuid for the newly created user'
     assert_equal created['uuid'], created2['uuid'], 'expected uuid not found'
+    assert_equal 0, ApiClientAuthorization.where(user_id: User.find_by_uuid(created['uuid']).id).size, 'token should have been deleted by user unsetup'
 
     verify_link_existence created['uuid'], created['email'], false, false, false, false, false
   end

commit 5c4316723fda70348f841a3ad1a7d8385f9e3c4a
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Thu Nov 11 14:51:31 2021 -0300

    Merge branch '17944-backend-vocabulary-validation-rebased' into main.
    
    Refs #17944
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/.gitignore b/.gitignore
index beb84b3c2..231424acc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -32,5 +32,6 @@ services/api/config/arvados-clients.yml
 .Rproj.user
 _version.py
 *.bak
+*.log
 arvados-snakeoil-ca.pem
 .vagrant
diff --git a/doc/_config.yml b/doc/_config.yml
index 31db9c41d..dde87323d 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -194,7 +194,7 @@ navbar:
       - admin/keep-balance.html.textile.liquid
       - admin/controlling-container-reuse.html.textile.liquid
       - admin/logs-table-management.html.textile.liquid
-      - admin/workbench2-vocabulary.html.textile.liquid
+      - admin/metadata-vocabulary.html.textile.liquid
       - admin/storage-classes.html.textile.liquid
       - admin/keep-recovering-data.html.textile.liquid
       - admin/keep-measuring-deduplication.html.textile.liquid
diff --git a/doc/_includes/_wb2_vocabulary_example.liquid b/doc/_includes/_metadata_vocabulary_example.liquid
similarity index 90%
rename from doc/_includes/_wb2_vocabulary_example.liquid
rename to doc/_includes/_metadata_vocabulary_example.liquid
index ee2ac97ef..fb8e57725 100644
--- a/doc/_includes/_wb2_vocabulary_example.liquid
+++ b/doc/_includes/_metadata_vocabulary_example.liquid
@@ -1,4 +1,8 @@
-{
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}{
     "strict_tags": false,
     "tags": {
         "IDTAGANIMALS": {
diff --git a/doc/admin/workbench2-vocabulary.html.textile.liquid b/doc/admin/metadata-vocabulary.html.textile.liquid
similarity index 75%
rename from doc/admin/workbench2-vocabulary.html.textile.liquid
rename to doc/admin/metadata-vocabulary.html.textile.liquid
index 9a8d7fcd0..170699ab6 100644
--- a/doc/admin/workbench2-vocabulary.html.textile.liquid
+++ b/doc/admin/metadata-vocabulary.html.textile.liquid
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: admin
-title: User properties vocabulary
+title: Metadata vocabulary
 ...
 
 {% comment %}
@@ -12,17 +12,19 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Many Arvados objects (like collections and projects) can store metadata as properties that in turn can be used in searches allowing a flexible way of organizing data inside the system.
 
-The Workbench2 user interface enables the site adminitrator to set up a properties vocabulary formal definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept.
+Arvados enables the site administrator to set up a formal metadata vocabulary definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept on clients' UI such as workbench2.
 
-h2. Workbench2 configuration
+The Controller service loads and caches the configured vocabulary file in memory at startup time, exporting it on a particular endpoint. From time to time, it'll check for updates in the local copy and refresh its cache if validation passes.
 
-Workbench2 retrieves the vocabulary file URL from the cluster config as shown:
+h2. Configuration
+
+The site administrator should place the JSON vocabulary file on the same host as the controller service and set up the config file as follows:
 
 <notextile>
 <pre><code>Cluster:
   zzzzz:
-    Workbench:
-      VocabularyURL: <span class="userinput">https://site.example.com/vocabulary.json</span>
+    API:
+      VocabularyPath: <span class="userinput">/etc/arvados/vocabulary.json</span>
 </code></pre>
 </notextile>
 
@@ -35,10 +37,12 @@ Keys and values are indexed by identifiers so that the concept of a term is pres
 The following is an example of a vocabulary definition:
 
 {% codeblock as json %}
-{% include 'wb2_vocabulary_example' %}
+{% include 'metadata_vocabulary_example' %}
 {% endcodeblock %}
 
-If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. Take notice that this restriction is at the client level on Workbench2, it doesn't limit the user's ability to set any arbitrary property via other means (e.g. Python SDK or CLI commands)
+For clients to be able to query the vocabulary definition, a special endpoint is exposed on the @controller@ service: @/arvados/v1/vocabulary at . This endpoint doesn't require authentication and returns the vocabulary definition in JSON format.
+
+If the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. This restriction is enforced at the backend level to ensure consistency across different clients.
 
 Inside the @tags@ member, IDs are defined (@IDTAGANIMALS@, @IDTAGCOMMENT@, @IDTAGIMPORTANCES@) and can have any format that the current application requires. Every key will declare at least a @labels@ list with zero or more label objects.
 
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index 5a5154ce5..be1103243 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -35,6 +35,23 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
+h2(#main). development main (as of 2021-11-10)
+
+"previous: Upgrading from 2.3.0":#v2_3_0
+
+h3. Dedicated keepstore process for each container
+
+When Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.
+* If you have containers that can effectively handle multiple I/O threads per VCPU, consider increasing the @Containers.LocalKeepBlobBuffersPerVCPU@ value.
+* If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers.
+* This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication at . If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log.
+
+h3. Backend support for vocabulary checking
+
+If your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed.
+This will enable the vocabulary checking cluster-wide, including Workbench2. The @Workbench.VocabularyURL@ configuration parameter is deprecated and will be removed in a future release.
+You can read more about how this feature works on the "admin page":{{site.baseurl}}/admin/metadata-vocabulary.html.
+
 h2(#v2_3_0). v2.3.0 (2021-10-27)
 
 "previous: Upgrading from 2.2.0":#v2_2_0
@@ -281,7 +298,7 @@ Workbench 2 is now ready for regular use.  Follow the instructions to "install w
 
 h3. New property vocabulary format for Workbench2
 
-(feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "workbench2 vocabulary format admin page":{{site.baseurl}}/admin/workbench2-vocabulary.html for more information.
+(feature "#14151":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the "metadata vocabulary format admin page":{{site.baseurl}}/admin/metadata-vocabulary.html for more information.
 
 h3. Cloud installations only: node manager replaced by arvados-dispatch-cloud
 
diff --git a/doc/install/install-workbench2-app.html.textile.liquid b/doc/install/install-workbench2-app.html.textile.liquid
index f3a320b10..c9a1c7012 100644
--- a/doc/install/install-workbench2-app.html.textile.liquid
+++ b/doc/install/install-workbench2-app.html.textile.liquid
@@ -75,7 +75,7 @@ server {
 
 h2. Vocabulary configuration (optional)
 
-Workbench2 can load a vocabulary file which lists available metadata properties for groups and collections.  To configure the property vocabulary definition, please visit the "Workbench2 Vocabulary Format":{{site.baseurl}}/admin/workbench2-vocabulary.html page in the Admin section.
+Workbench2 can load a vocabulary file which lists available metadata properties for groups and collections.  To configure the property vocabulary definition, please visit the "Metadata Vocabulary Format":{{site.baseurl}}/admin/metadata-vocabulary.html page in the Admin section.
 
 {% assign arvados_component = 'arvados-workbench2' %}
 
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 52e35d83f..9971d3cae 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -234,6 +234,12 @@ Clusters:
       # Timeout on requests to internal Keep services.
       KeepServiceRequestTimeout: 15s
 
+      # Vocabulary file path, local to the node running the controller.
+      # This JSON file should contain the description of what's allowed
+      # as object's metadata. Its format is described at:
+      # https://doc.arvados.org/admin/metadata-vocabulary.html
+      VocabularyPath: ""
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -1530,7 +1536,6 @@ Clusters:
       DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
 
       # Workbench2 configs
-      VocabularyURL: ""
       FileViewersConfigURL: ""
 
       # Idle time after which the user's session will be auto closed.
diff --git a/lib/config/export.go b/lib/config/export.go
index 92e2d7b4d..f2c15b0ee 100644
--- a/lib/config/export.go
+++ b/lib/config/export.go
@@ -72,6 +72,7 @@ var whitelist = map[string]bool{
 	"API.MaxTokenLifetime":                                false,
 	"API.RequestTimeout":                                  true,
 	"API.SendTimeout":                                     true,
+	"API.VocabularyPath":                                  false,
 	"API.WebsocketClientEventQueue":                       false,
 	"API.WebsocketServerEventQueue":                       false,
 	"AuditLogs":                                           false,
@@ -274,7 +275,6 @@ var whitelist = map[string]bool{
 	"Workbench.UserProfileFormFields.*.*":                 true,
 	"Workbench.UserProfileFormFields.*.*.*":               true,
 	"Workbench.UserProfileFormMessage":                    true,
-	"Workbench.VocabularyURL":                             true,
 	"Workbench.WelcomePageHTML":                           true,
 }
 
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index c58bbe178..4b4248db6 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -240,6 +240,12 @@ Clusters:
       # Timeout on requests to internal Keep services.
       KeepServiceRequestTimeout: 15s
 
+      # Vocabulary file path, local to the node running the controller.
+      # This JSON file should contain the description of what's allowed
+      # as object's metadata. Its format is described at:
+      # https://doc.arvados.org/admin/metadata-vocabulary.html
+      VocabularyPath: ""
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -1536,7 +1542,6 @@ Clusters:
       DefaultOpenIdPrefix: "https://www.google.com/accounts/o8/id"
 
       # Workbench2 configs
-      VocabularyURL: ""
       FileViewersConfigURL: ""
 
       # Idle time after which the user's session will be auto closed.
diff --git a/lib/controller/federation.go b/lib/controller/federation.go
index 144d41c21..cd69727ec 100644
--- a/lib/controller/federation.go
+++ b/lib/controller/federation.go
@@ -121,8 +121,6 @@ func (h *Handler) setupProxyRemoteCluster(next http.Handler) http.Handler {
 
 		mux.ServeHTTP(w, req)
 	})
-
-	return mux
 }
 
 type CurrentUser struct {
diff --git a/lib/controller/federation/conn.go b/lib/controller/federation/conn.go
index 39e4f2676..d1bf473d7 100644
--- a/lib/controller/federation/conn.go
+++ b/lib/controller/federation/conn.go
@@ -22,6 +22,7 @@ import (
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/auth"
 	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	"git.arvados.org/arvados.git/sdk/go/health"
 )
 
 type Conn struct {
@@ -30,7 +31,7 @@ type Conn struct {
 	remotes map[string]backend
 }
 
-func New(cluster *arvados.Cluster) *Conn {
+func New(cluster *arvados.Cluster, healthFuncs *map[string]health.Func) *Conn {
 	local := localdb.NewConn(cluster)
 	remotes := map[string]backend{}
 	for id, remote := range cluster.RemoteClusters {
@@ -44,6 +45,11 @@ func New(cluster *arvados.Cluster) *Conn {
 		remotes[id] = conn
 	}
 
+	if healthFuncs != nil {
+		hf := map[string]health.Func{"vocabulary": local.LastVocabularyError}
+		*healthFuncs = hf
+	}
+
 	return &Conn{
 		cluster: cluster,
 		local:   local,
@@ -202,6 +208,10 @@ func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
 	return json.RawMessage(buf.Bytes()), err
 }
 
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+	return conn.chooseBackend(conn.cluster.ClusterID).VocabularyGet(ctx)
+}
+
 func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
 	if id := conn.cluster.Login.LoginCluster; id != "" && id != conn.cluster.ClusterID {
 		// defer entire login procedure to designated cluster
@@ -475,6 +485,26 @@ func (conn *Conn) GroupUntrash(ctx context.Context, options arvados.UntrashOptio
 	return conn.chooseBackend(options.UUID).GroupUntrash(ctx, options)
 }
 
+func (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+	return conn.chooseBackend(options.ClusterID).LinkCreate(ctx, options)
+}
+
+func (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+	return conn.chooseBackend(options.UUID).LinkUpdate(ctx, options)
+}
+
+func (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+	return conn.chooseBackend(options.UUID).LinkGet(ctx, options)
+}
+
+func (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+	return conn.generated_LinkList(ctx, options)
+}
+
+func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+	return conn.chooseBackend(options.UUID).LinkDelete(ctx, options)
+}
+
 func (conn *Conn) SpecimenList(ctx context.Context, options arvados.ListOptions) (arvados.SpecimenList, error) {
 	return conn.generated_SpecimenList(ctx, options)
 }
diff --git a/lib/controller/federation/federation_test.go b/lib/controller/federation/federation_test.go
index 984d32dc3..5460e938a 100644
--- a/lib/controller/federation/federation_test.go
+++ b/lib/controller/federation/federation_test.go
@@ -70,7 +70,7 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
 	ctx = ctrlctx.NewWithTransaction(ctx, s.tx)
 	s.ctx = ctx
 
-	s.fed = New(s.cluster)
+	s.fed = New(s.cluster, nil)
 }
 
 func (s *FederationSuite) TearDownTest(c *check.C) {
diff --git a/lib/controller/federation/generate.go b/lib/controller/federation/generate.go
index 06a5ce12d..b49e138ce 100644
--- a/lib/controller/federation/generate.go
+++ b/lib/controller/federation/generate.go
@@ -53,7 +53,7 @@ func main() {
 		defer out.Close()
 		out.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\n\)\n`).Find(buf))
 		io.WriteString(out, "//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\n")
-		for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User"} {
+		for _, t := range []string{"Container", "ContainerRequest", "Group", "Specimen", "User", "Link"} {
 			_, err := out.Write(bytes.ReplaceAll(orig, []byte("Collection"), []byte(t)))
 			if err != nil {
 				panic(err)
diff --git a/lib/controller/federation/generated.go b/lib/controller/federation/generated.go
index 49a2e5b75..e8a5a08ff 100755
--- a/lib/controller/federation/generated.go
+++ b/lib/controller/federation/generated.go
@@ -221,3 +221,44 @@ func (conn *Conn) generated_UserList(ctx context.Context, options arvados.ListOp
 	}
 	return merged, err
 }
+
+func (conn *Conn) generated_LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+	var mtx sync.Mutex
+	var merged arvados.LinkList
+	var needSort atomic.Value
+	needSort.Store(false)
+	err := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {
+		options.ForwardedFor = conn.cluster.ClusterID + "-" + options.ForwardedFor
+		cl, err := backend.LinkList(ctx, options)
+		if err != nil {
+			return nil, err
+		}
+		mtx.Lock()
+		defer mtx.Unlock()
+		if len(merged.Items) == 0 {
+			merged = cl
+		} else if len(cl.Items) > 0 {
+			merged.Items = append(merged.Items, cl.Items...)
+			needSort.Store(true)
+		}
+		uuids := make([]string, 0, len(cl.Items))
+		for _, item := range cl.Items {
+			uuids = append(uuids, item.UUID)
+		}
+		return uuids, nil
+	})
+	if needSort.Load().(bool) {
+		// Apply the default/implied order, "modified_at desc"
+		sort.Slice(merged.Items, func(i, j int) bool {
+			mi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt
+			return mj.Before(mi)
+		})
+	}
+	if merged.Items == nil {
+		// Return empty results as [], not null
+		// (https://github.com/golang/go/issues/27589 might be
+		// a better solution in the future)
+		merged.Items = []arvados.Link{}
+	}
+	return merged, err
+}
diff --git a/lib/controller/federation/login_test.go b/lib/controller/federation/login_test.go
index 5353ebf0f..c05ebfce6 100644
--- a/lib/controller/federation/login_test.go
+++ b/lib/controller/federation/login_test.go
@@ -47,7 +47,7 @@ func (s *LoginSuite) TestLogout(c *check.C) {
 	s.cluster.Login.LoginCluster = "zhome"
 	// s.fed is already set by SetUpTest, but we need to
 	// reinitialize with the above config changes.
-	s.fed = New(s.cluster)
+	s.fed = New(s.cluster, nil)
 
 	returnTo := "https://app.example.com/foo?bar"
 	for _, trial := range []struct {
diff --git a/lib/controller/federation/user_test.go b/lib/controller/federation/user_test.go
index 2812c1f41..064f8ce5d 100644
--- a/lib/controller/federation/user_test.go
+++ b/lib/controller/federation/user_test.go
@@ -30,7 +30,7 @@ type UserSuite struct {
 func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
 	s.cluster.ClusterID = "local"
 	s.cluster.Login.LoginCluster = "zzzzz"
-	s.fed = New(s.cluster)
+	s.fed = New(s.cluster, nil)
 	s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
 
 	for _, updateFail := range []bool{false, true} {
@@ -120,7 +120,7 @@ func (s *UserSuite) TestLoginClusterUserList(c *check.C) {
 func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
 	s.cluster.ClusterID = "local"
 	s.cluster.Login.LoginCluster = "zzzzz"
-	s.fed = New(s.cluster)
+	s.fed = New(s.cluster, nil)
 	s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")}, true, rpc.PassthroughTokenProvider))
 
 	opts := arvados.GetOptions{UUID: "zzzzz-tpzed-xurymjxw79nv3jz", Select: []string{"uuid", "email"}}
@@ -174,7 +174,7 @@ func (s *UserSuite) TestLoginClusterUserGet(c *check.C) {
 func (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {
 	s.cluster.ClusterID = "local"
 	s.cluster.Login.LoginCluster = "zzzzz"
-	s.fed = New(s.cluster)
+	s.fed = New(s.cluster, nil)
 	s.addDirectRemote(c, "zzzzz", rpc.NewConn("zzzzz", &url.URL{Scheme: "https", Host: os.Getenv("ARVADOS_API_HOST")},
 		true, rpc.PassthroughTokenProvider))
 
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
index a35d00301..b51d90911 100644
--- a/lib/controller/handler.go
+++ b/lib/controller/handler.go
@@ -9,6 +9,7 @@ import (
 	"errors"
 	"fmt"
 	"net/http"
+	"net/http/httptest"
 	"net/url"
 	"strings"
 	"sync"
@@ -74,7 +75,21 @@ func (h *Handler) CheckHealth() error {
 		return err
 	}
 	_, _, err = railsproxy.FindRailsAPI(h.Cluster)
-	return err
+	if err != nil {
+		return err
+	}
+	if h.Cluster.API.VocabularyPath != "" {
+		req, err := http.NewRequest("GET", "/arvados/v1/vocabulary", nil)
+		if err != nil {
+			return err
+		}
+		var resp httptest.ResponseRecorder
+		h.handlerStack.ServeHTTP(&resp, req)
+		if resp.Result().StatusCode != http.StatusOK {
+			return fmt.Errorf("%d %s", resp.Result().StatusCode, resp.Result().Status)
+		}
+	}
+	return nil
 }
 
 func (h *Handler) Done() <-chan struct{} {
@@ -85,18 +100,25 @@ func neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLas
 
 func (h *Handler) setup() {
 	mux := http.NewServeMux()
-	mux.Handle("/_health/", &health.Handler{
-		Token:  h.Cluster.ManagementToken,
-		Prefix: "/_health/",
-		Routes: health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }},
-	})
+	healthFuncs := make(map[string]health.Func)
 
 	oidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.db)
-	rtr := router.New(federation.New(h.Cluster), router.Config{
+	rtr := router.New(federation.New(h.Cluster, &healthFuncs), router.Config{
 		MaxRequestSize: h.Cluster.API.MaxRequestSize,
 		WrapCalls:      api.ComposeWrappers(ctrlctx.WrapCallsInTransactions(h.db), oidcAuthorizer.WrapCalls),
 	})
+
+	healthRoutes := health.Routes{"ping": func() error { _, err := h.db(context.TODO()); return err }}
+	for name, f := range healthFuncs {
+		healthRoutes[name] = f
+	}
+	mux.Handle("/_health/", &health.Handler{
+		Token:  h.Cluster.ManagementToken,
+		Prefix: "/_health/",
+		Routes: healthRoutes,
+	})
 	mux.Handle("/arvados/v1/config", rtr)
+	mux.Handle("/arvados/v1/vocabulary", rtr)
 	mux.Handle("/"+arvados.EndpointUserAuthenticate.Path, rtr) // must come before .../users/
 	mux.Handle("/arvados/v1/collections", rtr)
 	mux.Handle("/arvados/v1/collections/", rtr)
@@ -107,6 +129,8 @@ func (h *Handler) setup() {
 	mux.Handle("/arvados/v1/container_requests/", rtr)
 	mux.Handle("/arvados/v1/groups", rtr)
 	mux.Handle("/arvados/v1/groups/", rtr)
+	mux.Handle("/arvados/v1/links", rtr)
+	mux.Handle("/arvados/v1/links/", rtr)
 	mux.Handle("/login", rtr)
 	mux.Handle("/logout", rtr)
 
diff --git a/lib/controller/handler_test.go b/lib/controller/handler_test.go
index 9b71c349a..f854079f9 100644
--- a/lib/controller/handler_test.go
+++ b/lib/controller/handler_test.go
@@ -88,6 +88,104 @@ func (s *HandlerSuite) TestConfigExport(c *check.C) {
 	}
 }
 
+func (s *HandlerSuite) TestVocabularyExport(c *check.C) {
+	voc := `{
+		"strict_tags": false,
+		"tags": {
+			"IDTAGIMPORTANCE": {
+				"strict": false,
+				"labels": [{"label": "Importance"}],
+				"values": {
+					"HIGH": {
+						"labels": [{"label": "High"}]
+					},
+					"LOW": {
+						"labels": [{"label": "Low"}]
+					}
+				}
+			}
+		}
+	}`
+	f, err := os.CreateTemp("", "test-vocabulary-*.json")
+	c.Assert(err, check.IsNil)
+	defer os.Remove(f.Name())
+	_, err = f.WriteString(voc)
+	c.Assert(err, check.IsNil)
+	f.Close()
+	s.cluster.API.VocabularyPath = f.Name()
+	for _, method := range []string{"GET", "OPTIONS"} {
+		c.Log(c.TestName()+" ", method)
+		req := httptest.NewRequest(method, "/arvados/v1/vocabulary", nil)
+		resp := httptest.NewRecorder()
+		s.handler.ServeHTTP(resp, req)
+		c.Log(resp.Body.String())
+		if !c.Check(resp.Code, check.Equals, http.StatusOK) {
+			continue
+		}
+		c.Check(resp.Header().Get("Access-Control-Allow-Origin"), check.Equals, `*`)
+		c.Check(resp.Header().Get("Access-Control-Allow-Methods"), check.Matches, `.*\bGET\b.*`)
+		c.Check(resp.Header().Get("Access-Control-Allow-Headers"), check.Matches, `.+`)
+		if method == "OPTIONS" {
+			c.Check(resp.Body.String(), check.HasLen, 0)
+			continue
+		}
+		var expectedVoc, receivedVoc *arvados.Vocabulary
+		err := json.Unmarshal([]byte(voc), &expectedVoc)
+		c.Check(err, check.IsNil)
+		err = json.Unmarshal(resp.Body.Bytes(), &receivedVoc)
+		c.Check(err, check.IsNil)
+		c.Check(receivedVoc, check.DeepEquals, expectedVoc)
+	}
+}
+
+func (s *HandlerSuite) TestVocabularyFailedCheckStatus(c *check.C) {
+	voc := `{
+		"strict_tags": false,
+		"tags": {
+			"IDTAGIMPORTANCE": {
+				"strict": true,
+				"labels": [{"label": "Importance"}],
+				"values": {
+					"HIGH": {
+						"labels": [{"label": "High"}]
+					},
+					"LOW": {
+						"labels": [{"label": "Low"}]
+					}
+				}
+			}
+		}
+	}`
+	f, err := os.CreateTemp("", "test-vocabulary-*.json")
+	c.Assert(err, check.IsNil)
+	defer os.Remove(f.Name())
+	_, err = f.WriteString(voc)
+	c.Assert(err, check.IsNil)
+	f.Close()
+	s.cluster.API.VocabularyPath = f.Name()
+
+	req := httptest.NewRequest("POST", "/arvados/v1/collections",
+		strings.NewReader(`{
+			"collection": {
+				"properties": {
+					"IDTAGIMPORTANCE": "Critical"
+				}
+			}
+		}`))
+	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+	req.Header.Set("Content-type", "application/json")
+
+	resp := httptest.NewRecorder()
+	s.handler.ServeHTTP(resp, req)
+	c.Log(resp.Body.String())
+	c.Assert(resp.Code, check.Equals, http.StatusBadRequest)
+	var jresp httpserver.ErrorResponse
+	err = json.Unmarshal(resp.Body.Bytes(), &jresp)
+	c.Check(err, check.IsNil)
+	c.Assert(len(jresp.Errors), check.Equals, 1)
+	c.Check(jresp.Errors[0], check.Matches, `.*tag value.*is not valid for key.*`)
+}
+
 func (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {
 	req := httptest.NewRequest("GET", "/discovery/v1/apis/arvados/v1/rest", nil)
 	resp := httptest.NewRecorder()
@@ -245,7 +343,7 @@ func (s *HandlerSuite) CheckObjectType(c *check.C, url string, token string, ski
 	resp := httptest.NewRecorder()
 	s.handler.ServeHTTP(resp, req)
 	c.Assert(resp.Code, check.Equals, http.StatusOK,
-		check.Commentf("Wasn't able to get data from the controller at %q", url))
+		check.Commentf("Wasn't able to get data from the controller at %q: %q", url, resp.Body.String()))
 	err = json.Unmarshal(resp.Body.Bytes(), &proxied)
 	c.Check(err, check.Equals, nil)
 
diff --git a/lib/controller/localdb/collection.go b/lib/controller/localdb/collection.go
index d81dd812b..96c89252e 100644
--- a/lib/controller/localdb/collection.go
+++ b/lib/controller/localdb/collection.go
@@ -49,8 +49,12 @@ func (conn *Conn) CollectionList(ctx context.Context, opts arvados.ListOptions)
 }
 
 // CollectionCreate defers to railsProxy for everything except blob
-// signatures.
+// signatures and vocabulary checking.
 func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Collection, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.Collection{}, err
+	}
 	if len(opts.Select) > 0 {
 		// We need to know IsTrashed and TrashAt to implement
 		// signing properly, even if the caller doesn't want
@@ -66,8 +70,12 @@ func (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptio
 }
 
 // CollectionUpdate defers to railsProxy for everything except blob
-// signatures.
+// signatures and vocabulary checking.
 func (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Collection, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.Collection{}, err
+	}
 	if len(opts.Select) > 0 {
 		// We need to know IsTrashed and TrashAt to implement
 		// signing properly, even if the caller doesn't want
diff --git a/lib/controller/localdb/collection_test.go b/lib/controller/localdb/collection_test.go
index 4a4494964..bbfb81116 100644
--- a/lib/controller/localdb/collection_test.go
+++ b/lib/controller/localdb/collection_test.go
@@ -48,6 +48,93 @@ func (s *CollectionSuite) TearDownTest(c *check.C) {
 	s.railsSpy.Close()
 }
 
+func (s *CollectionSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+	if testVocabulary == "" {
+		testVocabulary = `{
+			"strict_tags": false,
+			"tags": {
+				"IDTAGIMPORTANCES": {
+					"strict": true,
+					"labels": [{"label": "Importance"}, {"label": "Priority"}],
+					"values": {
+						"IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+						"IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+						"IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+					}
+				}
+			}
+		}`
+	}
+	voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+	c.Assert(err, check.IsNil)
+	s.cluster.API.VocabularyPath = "foo"
+	s.localdb.vocabularyCache = voc
+}
+
+func (s *CollectionSuite) TestCollectionCreateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+
+		coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(coll.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
+
+func (s *CollectionSuite) TestCollectionUpdateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		coll, err := s.localdb.CollectionCreate(ctx, arvados.CreateOptions{})
+		c.Assert(err, check.IsNil)
+		coll, err = s.localdb.CollectionUpdate(ctx, arvados.UpdateOptions{
+			UUID:   coll.UUID,
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(coll.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
+
 func (s *CollectionSuite) TestSignatures(c *check.C) {
 	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
 
diff --git a/lib/controller/localdb/conn.go b/lib/controller/localdb/conn.go
index a90deded5..323e660c6 100644
--- a/lib/controller/localdb/conn.go
+++ b/lib/controller/localdb/conn.go
@@ -6,27 +6,37 @@ package localdb
 
 import (
 	"context"
+	"encoding/json"
 	"fmt"
+	"net/http"
+	"os"
 	"strings"
+	"time"
 
 	"git.arvados.org/arvados.git/lib/controller/railsproxy"
 	"git.arvados.org/arvados.git/lib/controller/rpc"
 	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	"git.arvados.org/arvados.git/sdk/go/httpserver"
+	"github.com/sirupsen/logrus"
 )
 
 type railsProxy = rpc.Conn
 
 type Conn struct {
-	cluster     *arvados.Cluster
-	*railsProxy // handles API methods that aren't defined on Conn itself
+	cluster                    *arvados.Cluster
+	*railsProxy                // handles API methods that aren't defined on Conn itself
+	vocabularyCache            *arvados.Vocabulary
+	vocabularyFileModTime      time.Time
+	lastVocabularyRefreshCheck time.Time
+	lastVocabularyError        error
 	loginController
 }
 
 func NewConn(cluster *arvados.Cluster) *Conn {
 	railsProxy := railsproxy.NewConn(cluster)
 	railsProxy.RedactHostInErrors = true
-	var conn Conn
-	conn = Conn{
+	conn := Conn{
 		cluster:    cluster,
 		railsProxy: railsProxy,
 	}
@@ -34,6 +44,106 @@ func NewConn(cluster *arvados.Cluster) *Conn {
 	return &conn
 }
 
+func (conn *Conn) checkProperties(ctx context.Context, properties interface{}) error {
+	if properties == nil {
+		return nil
+	}
+	var props map[string]interface{}
+	switch properties := properties.(type) {
+	case string:
+		err := json.Unmarshal([]byte(properties), &props)
+		if err != nil {
+			return err
+		}
+	case map[string]interface{}:
+		props = properties
+	default:
+		return fmt.Errorf("unexpected properties type %T", properties)
+	}
+	voc, err := conn.VocabularyGet(ctx)
+	if err != nil {
+		return err
+	}
+	err = voc.Check(props)
+	if err != nil {
+		return httpErrorf(http.StatusBadRequest, voc.Check(props).Error())
+	}
+	return nil
+}
+
+func (conn *Conn) maybeRefreshVocabularyCache(logger logrus.FieldLogger) error {
+	if conn.lastVocabularyRefreshCheck.Add(time.Second).After(time.Now()) {
+		// Throttle the access to disk to at most once per second.
+		return nil
+	}
+	conn.lastVocabularyRefreshCheck = time.Now()
+	fi, err := os.Stat(conn.cluster.API.VocabularyPath)
+	if err != nil {
+		err = fmt.Errorf("couldn't stat vocabulary file %q: %v", conn.cluster.API.VocabularyPath, err)
+		conn.lastVocabularyError = err
+		return err
+	}
+	if fi.ModTime().After(conn.vocabularyFileModTime) {
+		err = conn.loadVocabularyFile()
+		if err != nil {
+			conn.lastVocabularyError = err
+			return err
+		}
+		conn.vocabularyFileModTime = fi.ModTime()
+		conn.lastVocabularyError = nil
+		logger.Info("vocabulary file reloaded successfully")
+	}
+	return nil
+}
+
+func (conn *Conn) loadVocabularyFile() error {
+	vf, err := os.ReadFile(conn.cluster.API.VocabularyPath)
+	if err != nil {
+		return fmt.Errorf("couldn't reading the vocabulary file: %v", err)
+	}
+	mk := make([]string, 0, len(conn.cluster.Collections.ManagedProperties))
+	for k := range conn.cluster.Collections.ManagedProperties {
+		mk = append(mk, k)
+	}
+	voc, err := arvados.NewVocabulary(vf, mk)
+	if err != nil {
+		return fmt.Errorf("while loading vocabulary file %q: %s", conn.cluster.API.VocabularyPath, err)
+	}
+	conn.vocabularyCache = voc
+	return nil
+}
+
+// LastVocabularyError returns the last error encountered while loading the
+// vocabulary file.
+// Implements health.Func
+func (conn *Conn) LastVocabularyError() error {
+	conn.maybeRefreshVocabularyCache(ctxlog.FromContext(context.Background()))
+	return conn.lastVocabularyError
+}
+
+// VocabularyGet refreshes the vocabulary cache if necessary and returns it.
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+	if conn.cluster.API.VocabularyPath == "" {
+		return arvados.Vocabulary{
+			Tags: map[string]arvados.VocabularyTag{},
+		}, nil
+	}
+	logger := ctxlog.FromContext(ctx)
+	if conn.vocabularyCache == nil {
+		// Initial load of vocabulary file.
+		err := conn.loadVocabularyFile()
+		if err != nil {
+			logger.WithError(err).Error("error loading vocabulary file")
+			return arvados.Vocabulary{}, err
+		}
+	}
+	err := conn.maybeRefreshVocabularyCache(logger)
+	if err != nil {
+		logger.WithError(err).Error("error reloading vocabulary file - ignoring")
+	}
+	return *conn.vocabularyCache, nil
+}
+
 // Logout handles the logout of conn giving to the appropriate loginController
 func (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {
 	return conn.loginController.Logout(ctx, opts)
@@ -96,3 +206,7 @@ func (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupConten
 
 	return conn.railsProxy.GroupContents(ctx, options)
 }
+
+func httpErrorf(code int, format string, args ...interface{}) error {
+	return httpserver.ErrorWithStatus(fmt.Errorf(format, args...), code)
+}
diff --git a/lib/controller/localdb/container_request.go b/lib/controller/localdb/container_request.go
new file mode 100644
index 000000000..5b2ce95da
--- /dev/null
+++ b/lib/controller/localdb/container_request.go
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+	"context"
+
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// ContainerRequestCreate defers to railsProxy for everything except
+// vocabulary checking.
+func (conn *Conn) ContainerRequestCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.ContainerRequest, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.ContainerRequest{}, err
+	}
+	resp, err := conn.railsProxy.ContainerRequestCreate(ctx, opts)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// ContainerRequestUpdate defers to railsProxy for everything except
+// vocabulary checking.
+func (conn *Conn) ContainerRequestUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.ContainerRequest, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.ContainerRequest{}, err
+	}
+	resp, err := conn.railsProxy.ContainerRequestUpdate(ctx, opts)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/lib/controller/localdb/container_request_test.go b/lib/controller/localdb/container_request_test.go
new file mode 100644
index 000000000..cca541a40
--- /dev/null
+++ b/lib/controller/localdb/container_request_test.go
@@ -0,0 +1,166 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+	"context"
+
+	"git.arvados.org/arvados.git/lib/config"
+	"git.arvados.org/arvados.git/lib/controller/rpc"
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"git.arvados.org/arvados.git/sdk/go/auth"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&ContainerRequestSuite{})
+
+type ContainerRequestSuite struct {
+	cluster  *arvados.Cluster
+	localdb  *Conn
+	railsSpy *arvadostest.Proxy
+}
+
+func (s *ContainerRequestSuite) TearDownSuite(c *check.C) {
+	// Undo any changes/additions to the user database so they
+	// don't affect subsequent tests.
+	arvadostest.ResetEnv()
+	c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *ContainerRequestSuite) SetUpTest(c *check.C) {
+	cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+	c.Assert(err, check.IsNil)
+	s.cluster, err = cfg.GetCluster("")
+	c.Assert(err, check.IsNil)
+	s.localdb = NewConn(s.cluster)
+	s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+	*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *ContainerRequestSuite) TearDownTest(c *check.C) {
+	s.railsSpy.Close()
+}
+
+func (s *ContainerRequestSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+	if testVocabulary == "" {
+		testVocabulary = `{
+			"strict_tags": false,
+			"tags": {
+				"IDTAGIMPORTANCES": {
+					"strict": true,
+					"labels": [{"label": "Importance"}, {"label": "Priority"}],
+					"values": {
+						"IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+						"IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+						"IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+					}
+				}
+			}
+		}`
+	}
+	voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+	c.Assert(err, check.IsNil)
+	s.localdb.vocabularyCache = voc
+	s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+
+		cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"command":         []string{"echo", "foo"},
+				"container_image": "arvados/apitestfixture:latest",
+				"cwd":             "/tmp",
+				"environment":     map[string]string{},
+				"mounts": map[string]interface{}{
+					"/out": map[string]interface{}{
+						"kind":     "tmp",
+						"capacity": 1000000,
+					},
+				},
+				"output_path": "/out",
+				"runtime_constraints": map[string]interface{}{
+					"vcpus": 1,
+					"ram":   2,
+				},
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(cnt.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
+
+func (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		cnt, err := s.localdb.ContainerRequestCreate(ctx, arvados.CreateOptions{
+			Attrs: map[string]interface{}{
+				"command":         []string{"echo", "foo"},
+				"container_image": "arvados/apitestfixture:latest",
+				"cwd":             "/tmp",
+				"environment":     map[string]string{},
+				"mounts": map[string]interface{}{
+					"/out": map[string]interface{}{
+						"kind":     "tmp",
+						"capacity": 1000000,
+					},
+				},
+				"output_path": "/out",
+				"runtime_constraints": map[string]interface{}{
+					"vcpus": 1,
+					"ram":   2,
+				},
+			},
+		})
+		c.Assert(err, check.IsNil)
+		cnt, err = s.localdb.ContainerRequestUpdate(ctx, arvados.UpdateOptions{
+			UUID:   cnt.UUID,
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(cnt.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
diff --git a/lib/controller/localdb/group.go b/lib/controller/localdb/group.go
new file mode 100644
index 000000000..0d77bdbd9
--- /dev/null
+++ b/lib/controller/localdb/group.go
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+	"context"
+
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// GroupCreate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) GroupCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Group, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.Group{}, err
+	}
+	resp, err := conn.railsProxy.GroupCreate(ctx, opts)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// GroupUpdate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) GroupUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Group, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.Group{}, err
+	}
+	resp, err := conn.railsProxy.GroupUpdate(ctx, opts)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/lib/controller/localdb/group_test.go b/lib/controller/localdb/group_test.go
new file mode 100644
index 000000000..2d55def9f
--- /dev/null
+++ b/lib/controller/localdb/group_test.go
@@ -0,0 +1,138 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+	"context"
+
+	"git.arvados.org/arvados.git/lib/config"
+	"git.arvados.org/arvados.git/lib/controller/rpc"
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"git.arvados.org/arvados.git/sdk/go/auth"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&GroupSuite{})
+
+type GroupSuite struct {
+	cluster  *arvados.Cluster
+	localdb  *Conn
+	railsSpy *arvadostest.Proxy
+}
+
+func (s *GroupSuite) TearDownSuite(c *check.C) {
+	// Undo any changes/additions to the user database so they
+	// don't affect subsequent tests.
+	arvadostest.ResetEnv()
+	c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *GroupSuite) SetUpTest(c *check.C) {
+	cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+	c.Assert(err, check.IsNil)
+	s.cluster, err = cfg.GetCluster("")
+	c.Assert(err, check.IsNil)
+	s.localdb = NewConn(s.cluster)
+	s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+	*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *GroupSuite) TearDownTest(c *check.C) {
+	s.railsSpy.Close()
+}
+
+func (s *GroupSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+	if testVocabulary == "" {
+		testVocabulary = `{
+			"strict_tags": false,
+			"tags": {
+				"IDTAGIMPORTANCES": {
+					"strict": true,
+					"labels": [{"label": "Importance"}, {"label": "Priority"}],
+					"values": {
+						"IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+						"IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+						"IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+					}
+				}
+			}
+		}`
+	}
+	voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+	c.Assert(err, check.IsNil)
+	s.localdb.vocabularyCache = voc
+	s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+
+		grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"group_class": "project",
+				"properties":  tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(grp.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
+
+func (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		grp, err := s.localdb.GroupCreate(ctx, arvados.CreateOptions{
+			Attrs: map[string]interface{}{
+				"group_class": "project",
+			},
+		})
+		c.Assert(err, check.IsNil)
+		grp, err = s.localdb.GroupUpdate(ctx, arvados.UpdateOptions{
+			UUID:   grp.UUID,
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(grp.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
diff --git a/lib/controller/localdb/link.go b/lib/controller/localdb/link.go
new file mode 100644
index 000000000..cfcae3d38
--- /dev/null
+++ b/lib/controller/localdb/link.go
@@ -0,0 +1,39 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+	"context"
+
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+)
+
+// LinkCreate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) LinkCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Link, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.Link{}, err
+	}
+	resp, err := conn.railsProxy.LinkCreate(ctx, opts)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// LinkUpdate defers to railsProxy for everything except vocabulary
+// checking.
+func (conn *Conn) LinkUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Link, error) {
+	err := conn.checkProperties(ctx, opts.Attrs["properties"])
+	if err != nil {
+		return arvados.Link{}, err
+	}
+	resp, err := conn.railsProxy.LinkUpdate(ctx, opts)
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/lib/controller/localdb/link_test.go b/lib/controller/localdb/link_test.go
new file mode 100644
index 000000000..2f07fb459
--- /dev/null
+++ b/lib/controller/localdb/link_test.go
@@ -0,0 +1,142 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package localdb
+
+import (
+	"context"
+
+	"git.arvados.org/arvados.git/lib/config"
+	"git.arvados.org/arvados.git/lib/controller/rpc"
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"git.arvados.org/arvados.git/sdk/go/auth"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&LinkSuite{})
+
+type LinkSuite struct {
+	cluster  *arvados.Cluster
+	localdb  *Conn
+	railsSpy *arvadostest.Proxy
+}
+
+func (s *LinkSuite) TearDownSuite(c *check.C) {
+	// Undo any changes/additions to the user database so they
+	// don't affect subsequent tests.
+	arvadostest.ResetEnv()
+	c.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, "POST", "database/reset", nil, nil), check.IsNil)
+}
+
+func (s *LinkSuite) SetUpTest(c *check.C) {
+	cfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()
+	c.Assert(err, check.IsNil)
+	s.cluster, err = cfg.GetCluster("")
+	c.Assert(err, check.IsNil)
+	s.localdb = NewConn(s.cluster)
+	s.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)
+	*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)
+}
+
+func (s *LinkSuite) TearDownTest(c *check.C) {
+	s.railsSpy.Close()
+}
+
+func (s *LinkSuite) setUpVocabulary(c *check.C, testVocabulary string) {
+	if testVocabulary == "" {
+		testVocabulary = `{
+			"strict_tags": false,
+			"tags": {
+				"IDTAGIMPORTANCES": {
+					"strict": true,
+					"labels": [{"label": "Importance"}, {"label": "Priority"}],
+					"values": {
+						"IDVALIMPORTANCES1": { "labels": [{"label": "Critical"}, {"label": "Urgent"}, {"label": "High"}] },
+						"IDVALIMPORTANCES2": { "labels": [{"label": "Normal"}, {"label": "Moderate"}] },
+						"IDVALIMPORTANCES3": { "labels": [{"label": "Low"}] }
+					}
+				}
+			}
+		}`
+	}
+	voc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})
+	c.Assert(err, check.IsNil)
+	s.localdb.vocabularyCache = voc
+	s.cluster.API.VocabularyPath = "foo"
+}
+
+func (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+
+		lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"link_class": "star",
+				"tail_uuid":  "zzzzz-j7d0g-publicfavorites",
+				"head_uuid":  arvadostest.FooCollection,
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(lnk.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
+
+func (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {
+	s.setUpVocabulary(c, "")
+	ctx := auth.NewContext(context.Background(), &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})
+
+	tests := []struct {
+		name    string
+		props   map[string]interface{}
+		success bool
+	}{
+		{"Invalid prop key", map[string]interface{}{"Priority": "IDVALIMPORTANCES1"}, false},
+		{"Invalid prop value", map[string]interface{}{"IDTAGIMPORTANCES": "high"}, false},
+		{"Valid prop key & value", map[string]interface{}{"IDTAGIMPORTANCES": "IDVALIMPORTANCES1"}, true},
+		{"Empty properties", map[string]interface{}{}, true},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		lnk, err := s.localdb.LinkCreate(ctx, arvados.CreateOptions{
+			Attrs: map[string]interface{}{
+				"link_class": "star",
+				"tail_uuid":  "zzzzz-j7d0g-publicfavorites",
+				"head_uuid":  arvadostest.FooCollection,
+			},
+		})
+		c.Assert(err, check.IsNil)
+		lnk, err = s.localdb.LinkUpdate(ctx, arvados.UpdateOptions{
+			UUID:   lnk.UUID,
+			Select: []string{"uuid", "properties"},
+			Attrs: map[string]interface{}{
+				"properties": tt.props,
+			}})
+		if tt.success {
+			c.Assert(err, check.IsNil)
+			c.Assert(lnk.Properties, check.DeepEquals, tt.props)
+		} else {
+			c.Assert(err, check.NotNil)
+		}
+	}
+}
diff --git a/lib/controller/router/router.go b/lib/controller/router/router.go
index 9826c1e74..02e06279f 100644
--- a/lib/controller/router/router.go
+++ b/lib/controller/router/router.go
@@ -65,6 +65,13 @@ func (rtr *router) addRoutes() {
 				return rtr.backend.ConfigGet(ctx)
 			},
 		},
+		{
+			arvados.EndpointVocabularyGet,
+			func() interface{} { return &struct{}{} },
+			func(ctx context.Context, opts interface{}) (interface{}, error) {
+				return rtr.backend.VocabularyGet(ctx)
+			},
+		},
 		{
 			arvados.EndpointLogin,
 			func() interface{} { return &arvados.LoginOptions{} },
@@ -307,6 +314,41 @@ func (rtr *router) addRoutes() {
 				return rtr.backend.GroupUntrash(ctx, *opts.(*arvados.UntrashOptions))
 			},
 		},
+		{
+			arvados.EndpointLinkCreate,
+			func() interface{} { return &arvados.CreateOptions{} },
+			func(ctx context.Context, opts interface{}) (interface{}, error) {
+				return rtr.backend.LinkCreate(ctx, *opts.(*arvados.CreateOptions))
+			},
+		},
+		{
+			arvados.EndpointLinkUpdate,
+			func() interface{} { return &arvados.UpdateOptions{} },
+			func(ctx context.Context, opts interface{}) (interface{}, error) {
+				return rtr.backend.LinkUpdate(ctx, *opts.(*arvados.UpdateOptions))
+			},
+		},
+		{
+			arvados.EndpointLinkList,
+			func() interface{} { return &arvados.ListOptions{Limit: -1} },
+			func(ctx context.Context, opts interface{}) (interface{}, error) {
+				return rtr.backend.LinkList(ctx, *opts.(*arvados.ListOptions))
+			},
+		},
+		{
+			arvados.EndpointLinkGet,
+			func() interface{} { return &arvados.GetOptions{} },
+			func(ctx context.Context, opts interface{}) (interface{}, error) {
+				return rtr.backend.LinkGet(ctx, *opts.(*arvados.GetOptions))
+			},
+		},
+		{
+			arvados.EndpointLinkDelete,
+			func() interface{} { return &arvados.DeleteOptions{} },
+			func(ctx context.Context, opts interface{}) (interface{}, error) {
+				return rtr.backend.LinkDelete(ctx, *opts.(*arvados.DeleteOptions))
+			},
+		},
 		{
 			arvados.EndpointSpecimenCreate,
 			func() interface{} { return &arvados.CreateOptions{} },
diff --git a/lib/controller/rpc/conn.go b/lib/controller/rpc/conn.go
index 640bbf1c2..25f47bc3b 100644
--- a/lib/controller/rpc/conn.go
+++ b/lib/controller/rpc/conn.go
@@ -178,6 +178,13 @@ func (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {
 	return resp, err
 }
 
+func (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+	ep := arvados.EndpointVocabularyGet
+	var resp arvados.Vocabulary
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, nil)
+	return resp, err
+}
+
 func (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
 	ep := arvados.EndpointLogin
 	var resp arvados.LoginResponse
@@ -495,6 +502,41 @@ func (conn *Conn) GroupUntrash(ctx context.Context, options arvados.UntrashOptio
 	return resp, err
 }
 
+func (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+	ep := arvados.EndpointLinkCreate
+	var resp arvados.Link
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+	return resp, err
+}
+
+func (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+	ep := arvados.EndpointLinkUpdate
+	var resp arvados.Link
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+	return resp, err
+}
+
+func (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+	ep := arvados.EndpointLinkGet
+	var resp arvados.Link
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+	return resp, err
+}
+
+func (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+	ep := arvados.EndpointLinkList
+	var resp arvados.LinkList
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+	return resp, err
+}
+
+func (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+	ep := arvados.EndpointLinkDelete
+	var resp arvados.Link
+	err := conn.requestAndDecode(ctx, &resp, ep, nil, options)
+	return resp, err
+}
+
 func (conn *Conn) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
 	ep := arvados.EndpointSpecimenCreate
 	var resp arvados.Specimen
diff --git a/sdk/go/arvados/api.go b/sdk/go/arvados/api.go
index b429e8008..0fdc13d19 100644
--- a/sdk/go/arvados/api.go
+++ b/sdk/go/arvados/api.go
@@ -23,6 +23,7 @@ type APIEndpoint struct {
 
 var (
 	EndpointConfigGet                     = APIEndpoint{"GET", "arvados/v1/config", ""}
+	EndpointVocabularyGet                 = APIEndpoint{"GET", "arvados/v1/vocabulary", ""}
 	EndpointLogin                         = APIEndpoint{"GET", "login", ""}
 	EndpointLogout                        = APIEndpoint{"GET", "logout", ""}
 	EndpointCollectionCreate              = APIEndpoint{"POST", "arvados/v1/collections", "collection"}
@@ -62,6 +63,11 @@ var (
 	EndpointGroupDelete                   = APIEndpoint{"DELETE", "arvados/v1/groups/{uuid}", ""}
 	EndpointGroupTrash                    = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/trash", ""}
 	EndpointGroupUntrash                  = APIEndpoint{"POST", "arvados/v1/groups/{uuid}/untrash", ""}
+	EndpointLinkCreate                    = APIEndpoint{"POST", "arvados/v1/links", "link"}
+	EndpointLinkUpdate                    = APIEndpoint{"PATCH", "arvados/v1/links/{uuid}", "link"}
+	EndpointLinkGet                       = APIEndpoint{"GET", "arvados/v1/links/{uuid}", ""}
+	EndpointLinkList                      = APIEndpoint{"GET", "arvados/v1/links", ""}
+	EndpointLinkDelete                    = APIEndpoint{"DELETE", "arvados/v1/links/{uuid}", ""}
 	EndpointUserActivate                  = APIEndpoint{"POST", "arvados/v1/users/{uuid}/activate", ""}
 	EndpointUserCreate                    = APIEndpoint{"POST", "arvados/v1/users", "user"}
 	EndpointUserCurrent                   = APIEndpoint{"GET", "arvados/v1/users/current", ""}
@@ -219,6 +225,7 @@ type BlockWriteResponse struct {
 
 type API interface {
 	ConfigGet(ctx context.Context) (json.RawMessage, error)
+	VocabularyGet(ctx context.Context) (Vocabulary, error)
 	Login(ctx context.Context, options LoginOptions) (LoginResponse, error)
 	Logout(ctx context.Context, options LogoutOptions) (LogoutResponse, error)
 	CollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)
@@ -252,6 +259,11 @@ type API interface {
 	GroupDelete(ctx context.Context, options DeleteOptions) (Group, error)
 	GroupTrash(ctx context.Context, options DeleteOptions) (Group, error)
 	GroupUntrash(ctx context.Context, options UntrashOptions) (Group, error)
+	LinkCreate(ctx context.Context, options CreateOptions) (Link, error)
+	LinkUpdate(ctx context.Context, options UpdateOptions) (Link, error)
+	LinkGet(ctx context.Context, options GetOptions) (Link, error)
+	LinkList(ctx context.Context, options ListOptions) (LinkList, error)
+	LinkDelete(ctx context.Context, options DeleteOptions) (Link, error)
 	SpecimenCreate(ctx context.Context, options CreateOptions) (Specimen, error)
 	SpecimenUpdate(ctx context.Context, options UpdateOptions) (Specimen, error)
 	SpecimenGet(ctx context.Context, options GetOptions) (Specimen, error)
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index f1d27b8dc..1cd002082 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -77,6 +77,12 @@ type UploadDownloadRolePermissions struct {
 	Admin UploadDownloadPermission
 }
 
+type ManagedProperties map[string]struct {
+	Value     interface{}
+	Function  string
+	Protected bool
+}
+
 type Cluster struct {
 	ClusterID       string `json:"-"`
 	ManagementToken string
@@ -102,6 +108,7 @@ type Cluster struct {
 		WebsocketClientEventQueue      int
 		WebsocketServerEventQueue      int
 		KeepServiceRequestTimeout      Duration
+		VocabularyPath                 string
 	}
 	AuditLogs struct {
 		MaxAge             Duration
@@ -109,23 +116,19 @@ type Cluster struct {
 		UnloggedAttributes StringSet
 	}
 	Collections struct {
-		BlobSigning              bool
-		BlobSigningKey           string
-		BlobSigningTTL           Duration
-		BlobTrash                bool
-		BlobTrashLifetime        Duration
-		BlobTrashCheckInterval   Duration
-		BlobTrashConcurrency     int
-		BlobDeleteConcurrency    int
-		BlobReplicateConcurrency int
-		CollectionVersioning     bool
-		DefaultTrashLifetime     Duration
-		DefaultReplication       int
-		ManagedProperties        map[string]struct {
-			Value     interface{}
-			Function  string
-			Protected bool
-		}
+		BlobSigning                  bool
+		BlobSigningKey               string
+		BlobSigningTTL               Duration
+		BlobTrash                    bool
+		BlobTrashLifetime            Duration
+		BlobTrashCheckInterval       Duration
+		BlobTrashConcurrency         int
+		BlobDeleteConcurrency        int
+		BlobReplicateConcurrency     int
+		CollectionVersioning         bool
+		DefaultTrashLifetime         Duration
+		DefaultReplication           int
+		ManagedProperties            ManagedProperties
 		PreserveVersionIfIdle        Duration
 		TrashSweepInterval           Duration
 		TrustAllContent              bool
@@ -273,7 +276,6 @@ type Cluster struct {
 			Options              map[string]struct{}
 		}
 		UserProfileFormMessage string
-		VocabularyURL          string
 		WelcomePageHTML        string
 		InactivePageHTML       string
 		SSHHelpPageHTML        string
diff --git a/sdk/go/arvados/link.go b/sdk/go/arvados/link.go
index f7d1f35a3..7df6b84d6 100644
--- a/sdk/go/arvados/link.go
+++ b/sdk/go/arvados/link.go
@@ -4,17 +4,25 @@
 
 package arvados
 
+import "time"
+
 // Link is an arvados#link record
 type Link struct {
-	UUID       string                 `json:"uuid,omiempty"`
-	OwnerUUID  string                 `json:"owner_uuid"`
-	Name       string                 `json:"name"`
-	LinkClass  string                 `json:"link_class"`
-	HeadUUID   string                 `json:"head_uuid"`
-	HeadKind   string                 `json:"head_kind"`
-	TailUUID   string                 `json:"tail_uuid"`
-	TailKind   string                 `json:"tail_kind"`
-	Properties map[string]interface{} `json:"properties"`
+	UUID                 string                 `json:"uuid,omitempty"`
+	Etag                 string                 `json:"etag"`
+	Href                 string                 `json:"href"`
+	OwnerUUID            string                 `json:"owner_uuid"`
+	Name                 string                 `json:"name"`
+	LinkClass            string                 `json:"link_class"`
+	CreatedAt            time.Time              `json:"created_at"`
+	ModifiedAt           time.Time              `json:"modified_at"`
+	ModifiedByClientUUID string                 `json:"modified_by_client_uuid"`
+	ModifiedByUserUUID   string                 `json:"modified_by_user_uuid"`
+	HeadUUID             string                 `json:"head_uuid"`
+	HeadKind             string                 `json:"head_kind"`
+	TailUUID             string                 `json:"tail_uuid"`
+	TailKind             string                 `json:"tail_kind"`
+	Properties           map[string]interface{} `json:"properties"`
 }
 
 // LinkList is an arvados#linkList resource.
diff --git a/sdk/go/arvados/vocabulary.go b/sdk/go/arvados/vocabulary.go
new file mode 100644
index 000000000..150091b30
--- /dev/null
+++ b/sdk/go/arvados/vocabulary.go
@@ -0,0 +1,220 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+type Vocabulary struct {
+	reservedTagKeys map[string]bool          `json:"-"`
+	StrictTags      bool                     `json:"strict_tags"`
+	Tags            map[string]VocabularyTag `json:"tags"`
+}
+
+type VocabularyTag struct {
+	Strict bool                          `json:"strict"`
+	Labels []VocabularyLabel             `json:"labels"`
+	Values map[string]VocabularyTagValue `json:"values"`
+}
+
+// Cannot have a constant map in Go, so we have to use a function
+func (v *Vocabulary) systemTagKeys() map[string]bool {
+	return map[string]bool{
+		"type":                  true,
+		"template_uuid":         true,
+		"groups":                true,
+		"username":              true,
+		"image_timestamp":       true,
+		"docker-image-repo-tag": true,
+		"filters":               true,
+		"container_request":     true,
+	}
+}
+
+type VocabularyLabel struct {
+	Label string `json:"label"`
+}
+
+type VocabularyTagValue struct {
+	Labels []VocabularyLabel `json:"labels"`
+}
+
+// NewVocabulary creates a new Vocabulary from a JSON definition and a list
+// of reserved tag keys that will get special treatment when strict mode is
+// enabled.
+func NewVocabulary(data []byte, managedTagKeys []string) (voc *Vocabulary, err error) {
+	if r := bytes.Compare(data, []byte("")); r == 0 {
+		return &Vocabulary{}, nil
+	}
+	err = json.Unmarshal(data, &voc)
+	if err != nil {
+		return nil, fmt.Errorf("invalid JSON format error: %q", err)
+	}
+	if reflect.DeepEqual(voc, &Vocabulary{}) {
+		return nil, fmt.Errorf("JSON data provided doesn't match Vocabulary format: %q", data)
+	}
+	voc.reservedTagKeys = make(map[string]bool)
+	for _, managedKey := range managedTagKeys {
+		voc.reservedTagKeys[managedKey] = true
+	}
+	for systemKey := range voc.systemTagKeys() {
+		voc.reservedTagKeys[systemKey] = true
+	}
+	err = voc.validate()
+	if err != nil {
+		return nil, err
+	}
+	return voc, nil
+}
+
+func (v *Vocabulary) validate() error {
+	if v == nil {
+		return nil
+	}
+	tagKeys := map[string]string{}
+	// Checks for Vocabulary strictness
+	if v.StrictTags && len(v.Tags) == 0 {
+		return fmt.Errorf("vocabulary is strict but no tags are defined")
+	}
+	// Checks for collisions between tag keys, reserved tag keys
+	// and tag key labels.
+	for key := range v.Tags {
+		if v.reservedTagKeys[key] {
+			return fmt.Errorf("tag key %q is reserved", key)
+		}
+		lcKey := strings.ToLower(key)
+		if tagKeys[lcKey] != "" {
+			return fmt.Errorf("duplicate tag key %q", key)
+		}
+		tagKeys[lcKey] = key
+		for _, lbl := range v.Tags[key].Labels {
+			label := strings.ToLower(lbl.Label)
+			if tagKeys[label] != "" {
+				return fmt.Errorf("tag label %q for key %q already seen as a tag key or label", lbl.Label, key)
+			}
+			tagKeys[label] = lbl.Label
+		}
+		// Checks for value strictness
+		if v.Tags[key].Strict && len(v.Tags[key].Values) == 0 {
+			return fmt.Errorf("tag key %q is configured as strict but doesn't provide values", key)
+		}
+		// Checks for collisions between tag values and tag value labels.
+		tagValues := map[string]string{}
+		for val := range v.Tags[key].Values {
+			lcVal := strings.ToLower(val)
+			if tagValues[lcVal] != "" {
+				return fmt.Errorf("duplicate tag value %q for tag %q", val, key)
+			}
+			// Checks for collisions between labels from different values.
+			tagValues[lcVal] = val
+			for _, tagLbl := range v.Tags[key].Values[val].Labels {
+				label := strings.ToLower(tagLbl.Label)
+				if tagValues[label] != "" && tagValues[label] != val {
+					return fmt.Errorf("tag value label %q for pair (%q:%q) already seen on value %q", tagLbl.Label, key, val, tagValues[label])
+				}
+				tagValues[label] = val
+			}
+		}
+	}
+	return nil
+}
+
+func (v *Vocabulary) getLabelsToKeys() (labels map[string]string) {
+	if v == nil {
+		return
+	}
+	labels = make(map[string]string)
+	for key, val := range v.Tags {
+		for _, lbl := range val.Labels {
+			label := strings.ToLower(lbl.Label)
+			labels[label] = key
+		}
+	}
+	return labels
+}
+
+func (v *Vocabulary) getLabelsToValues(key string) (labels map[string]string) {
+	if v == nil {
+		return
+	}
+	labels = make(map[string]string)
+	if _, ok := v.Tags[key]; ok {
+		for val := range v.Tags[key].Values {
+			labels[strings.ToLower(val)] = val
+			for _, tagLbl := range v.Tags[key].Values[val].Labels {
+				label := strings.ToLower(tagLbl.Label)
+				labels[label] = val
+			}
+		}
+	}
+	return labels
+}
+
+func (v *Vocabulary) checkValue(key, val string) error {
+	if _, ok := v.Tags[key].Values[val]; !ok {
+		lcVal := strings.ToLower(val)
+		correctValue, ok := v.getLabelsToValues(key)[lcVal]
+		if ok {
+			return fmt.Errorf("tag value %q for key %q is an alias, must be provided as %q", val, key, correctValue)
+		} else if v.Tags[key].Strict {
+			return fmt.Errorf("tag value %q is not valid for key %q", val, key)
+		}
+	}
+	return nil
+}
+
+// Check validates the given data against the vocabulary.
+func (v *Vocabulary) Check(data map[string]interface{}) error {
+	if v == nil {
+		return nil
+	}
+	for key, val := range data {
+		// Checks for key validity
+		if v.reservedTagKeys[key] {
+			// Allow reserved keys to be used even if they are not defined in
+			// the vocabulary no matter its strictness.
+			continue
+		}
+		if _, ok := v.Tags[key]; !ok {
+			lcKey := strings.ToLower(key)
+			correctKey, ok := v.getLabelsToKeys()[lcKey]
+			if ok {
+				return fmt.Errorf("tag key %q is an alias, must be provided as %q", key, correctKey)
+			} else if v.StrictTags {
+				return fmt.Errorf("tag key %q is not defined in the vocabulary", key)
+			}
+			// If the key is not defined, we don't need to check the value
+			continue
+		}
+		// Checks for value validity -- key is defined
+		switch val := val.(type) {
+		case string:
+			err := v.checkValue(key, val)
+			if err != nil {
+				return err
+			}
+		case []interface{}:
+			for _, singleVal := range val {
+				switch singleVal := singleVal.(type) {
+				case string:
+					err := v.checkValue(key, singleVal)
+					if err != nil {
+						return err
+					}
+				default:
+					return fmt.Errorf("value list element type for tag key %q was %T, but expected a string", key, singleVal)
+				}
+			}
+		default:
+			return fmt.Errorf("value type for tag key %q was %T, but expected a string or list of strings", key, val)
+		}
+	}
+	return nil
+}
diff --git a/sdk/go/arvados/vocabulary_test.go b/sdk/go/arvados/vocabulary_test.go
new file mode 100644
index 000000000..5a5189de2
--- /dev/null
+++ b/sdk/go/arvados/vocabulary_test.go
@@ -0,0 +1,457 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: Apache-2.0
+
+package arvados
+
+import (
+	"encoding/json"
+
+	check "gopkg.in/check.v1"
+)
+
+type VocabularySuite struct {
+	testVoc *Vocabulary
+}
+
+var _ = check.Suite(&VocabularySuite{})
+
+func (s *VocabularySuite) SetUpTest(c *check.C) {
+	s.testVoc = &Vocabulary{
+		reservedTagKeys: map[string]bool{
+			"reservedKey": true,
+		},
+		StrictTags: false,
+		Tags: map[string]VocabularyTag{
+			"IDTAGANIMALS": {
+				Strict: false,
+				Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+				Values: map[string]VocabularyTagValue{
+					"IDVALANIMAL1": {
+						Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Homo sapiens"}},
+					},
+					"IDVALANIMAL2": {
+						Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Loxodonta"}},
+					},
+				},
+			},
+			"IDTAGIMPORTANCE": {
+				Strict: true,
+				Labels: []VocabularyLabel{{Label: "Importance"}, {Label: "Priority"}},
+				Values: map[string]VocabularyTagValue{
+					"IDVAL3": {
+						Labels: []VocabularyLabel{{Label: "Low"}, {Label: "Low priority"}},
+					},
+					"IDVAL2": {
+						Labels: []VocabularyLabel{{Label: "Medium"}, {Label: "Medium priority"}},
+					},
+					"IDVAL1": {
+						Labels: []VocabularyLabel{{Label: "High"}, {Label: "High priority"}},
+					},
+				},
+			},
+			"IDTAGCOMMENT": {
+				Strict: false,
+				Labels: []VocabularyLabel{{Label: "Comment"}},
+			},
+		},
+	}
+	err := s.testVoc.validate()
+	c.Assert(err, check.IsNil)
+}
+
+func (s *VocabularySuite) TestCheck(c *check.C) {
+	tests := []struct {
+		name          string
+		strictVoc     bool
+		props         string
+		expectSuccess bool
+		errMatches    string
+	}{
+		// Check succeeds
+		{
+			"Known key, known value",
+			false,
+			`{"IDTAGANIMALS":"IDVALANIMAL1"}`,
+			true,
+			"",
+		},
+		{
+			"Unknown non-alias key on non-strict vocabulary",
+			false,
+			`{"foo":"bar"}`,
+			true,
+			"",
+		},
+		{
+			"Known non-strict key, unknown non-alias value",
+			false,
+			`{"IDTAGANIMALS":"IDVALANIMAL3"}`,
+			true,
+			"",
+		},
+		{
+			"Undefined but reserved key on strict vocabulary",
+			true,
+			`{"reservedKey":"bar"}`,
+			true,
+			"",
+		},
+		{
+			"Known key, list of known values",
+			false,
+			`{"IDTAGANIMALS":["IDVALANIMAL1","IDVALANIMAL2"]}`,
+			true,
+			"",
+		},
+		{
+			"Known non-strict key, list of unknown non-alias values",
+			false,
+			`{"IDTAGCOMMENT":["hello world","lorem ipsum"]}`,
+			true,
+			"",
+		},
+		// Check fails
+		{
+			"Known first key & value; known 2nd key, unknown 2nd value",
+			false,
+			`{"IDTAGANIMALS":"IDVALANIMAL1", "IDTAGIMPORTANCE": "blah blah"}`,
+			false,
+			"tag value.*is not valid for key.*",
+		},
+		{
+			"Unknown non-alias key on strict vocabulary",
+			true,
+			`{"foo":"bar"}`,
+			false,
+			"tag key.*is not defined in the vocabulary",
+		},
+		{
+			"Known non-strict key, known value alias",
+			false,
+			`{"IDTAGANIMALS":"Loxodonta"}`,
+			false,
+			"tag value.*for key.* is an alias, must be provided as.*",
+		},
+		{
+			"Known strict key, unknown non-alias value",
+			false,
+			`{"IDTAGIMPORTANCE":"Unimportant"}`,
+			false,
+			"tag value.*is not valid for key.*",
+		},
+		{
+			"Known strict key, lowercase value regarded as alias",
+			false,
+			`{"IDTAGIMPORTANCE":"idval1"}`,
+			false,
+			"tag value.*for key.* is an alias, must be provided as.*",
+		},
+		{
+			"Known strict key, known value alias",
+			false,
+			`{"IDTAGIMPORTANCE":"High"}`,
+			false,
+			"tag value.* for key.*is an alias, must be provided as.*",
+		},
+		{
+			"Known strict key, list of known alias values",
+			false,
+			`{"IDTAGIMPORTANCE":["High", "Low"]}`,
+			false,
+			"tag value.*for key.*is an alias, must be provided as.*",
+		},
+		{
+			"Known strict key, list of unknown non-alias values",
+			false,
+			`{"IDTAGIMPORTANCE":["foo","bar"]}`,
+			false,
+			"tag value.*is not valid for key.*",
+		},
+		{
+			"Invalid value type",
+			false,
+			`{"IDTAGANIMALS":1}`,
+			false,
+			"value type for tag key.* was.*, but expected a string or list of strings",
+		},
+		{
+			"Value list of invalid type",
+			false,
+			`{"IDTAGANIMALS":[1]}`,
+			false,
+			"value list element type for tag key.* was.*, but expected a string",
+		},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		s.testVoc.StrictTags = tt.strictVoc
+
+		var data map[string]interface{}
+		err := json.Unmarshal([]byte(tt.props), &data)
+		c.Assert(err, check.IsNil)
+		err = s.testVoc.Check(data)
+		if tt.expectSuccess {
+			c.Assert(err, check.IsNil)
+		} else {
+			c.Assert(err, check.NotNil)
+			c.Assert(err.Error(), check.Matches, tt.errMatches)
+		}
+	}
+}
+
+func (s *VocabularySuite) TestNewVocabulary(c *check.C) {
+	tests := []struct {
+		name       string
+		data       string
+		isValid    bool
+		errMatches string
+		expect     *Vocabulary
+	}{
+		{"Empty data", "", true, "", &Vocabulary{}},
+		{"Invalid JSON", "foo", false, "invalid JSON format.*", nil},
+		{"Valid, empty JSON", "{}", false, ".*doesn't match Vocabulary format.*", nil},
+		{"Valid JSON, wrong data", `{"foo":"bar"}`, false, ".*doesn't match Vocabulary format.*", nil},
+		{
+			"Simple valid example",
+			`{"tags":{
+				"IDTAGANIMALS":{
+					"strict": false,
+					"labels": [{"label": "Animal"}, {"label": "Creature"}],
+					"values": {
+						"IDVALANIMAL1":{"labels":[{"label":"Human"}, {"label":"Homo sapiens"}]},
+						"IDVALANIMAL2":{"labels":[{"label":"Elephant"}, {"label":"Loxodonta"}]},
+						"DOG":{"labels":[{"label":"Dog"}, {"label":"Canis lupus familiaris"}, {"label":"dOg"}]}
+					}
+				}
+			}}`,
+			true, "",
+			&Vocabulary{
+				reservedTagKeys: map[string]bool{
+					"type":                  true,
+					"template_uuid":         true,
+					"groups":                true,
+					"username":              true,
+					"image_timestamp":       true,
+					"docker-image-repo-tag": true,
+					"filters":               true,
+					"container_request":     true,
+				},
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+						Values: map[string]VocabularyTagValue{
+							"IDVALANIMAL1": {
+								Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Homo sapiens"}},
+							},
+							"IDVALANIMAL2": {
+								Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Loxodonta"}},
+							},
+							"DOG": {
+								Labels: []VocabularyLabel{{Label: "Dog"}, {Label: "Canis lupus familiaris"}, {Label: "dOg"}},
+							},
+						},
+					},
+				},
+			},
+		},
+		{
+			"Valid data, but uses reserved key",
+			`{"tags":{
+				"type":{
+					"strict": false,
+					"labels": [{"label": "Type"}]
+				}
+			}}`,
+			false, "tag key.*is reserved", nil,
+		},
+	}
+
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		voc, err := NewVocabulary([]byte(tt.data), []string{})
+		if tt.isValid {
+			c.Assert(err, check.IsNil)
+		} else {
+			c.Assert(err, check.NotNil)
+			if tt.errMatches != "" {
+				c.Assert(err, check.ErrorMatches, tt.errMatches)
+			}
+		}
+		c.Assert(voc, check.DeepEquals, tt.expect)
+	}
+}
+
+func (s *VocabularySuite) TestValidationErrors(c *check.C) {
+	tests := []struct {
+		name       string
+		voc        *Vocabulary
+		errMatches string
+	}{
+		{
+			"Strict vocabulary, no keys",
+			&Vocabulary{
+				StrictTags: true,
+			},
+			"vocabulary is strict but no tags are defined",
+		},
+		{
+			"Collision between tag key and tag key label",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+					},
+					"IDTAGCOMMENT": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "IDTAGANIMALS"}},
+					},
+				},
+			},
+			"", // Depending on how the map is sorted, this could be one of two errors
+		},
+		{
+			"Collision between tag key and tag key label (case-insensitive)",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+					},
+					"IDTAGCOMMENT": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "IdTagAnimals"}},
+					},
+				},
+			},
+			"", // Depending on how the map is sorted, this could be one of two errors
+		},
+		{
+			"Collision between tag key labels",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+					},
+					"IDTAGCOMMENT": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Comment"}, {Label: "Animal"}},
+					},
+				},
+			},
+			"tag label.*for key.*already seen.*",
+		},
+		{
+			"Collision between tag value and tag value label",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+						Values: map[string]VocabularyTagValue{
+							"IDVALANIMAL1": {
+								Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+							},
+							"IDVALANIMAL2": {
+								Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "IDVALANIMAL1"}},
+							},
+						},
+					},
+				},
+			},
+			"", // Depending on how the map is sorted, this could be one of two errors
+		},
+		{
+			"Collision between tag value and tag value label (case-insensitive)",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+						Values: map[string]VocabularyTagValue{
+							"IDVALANIMAL1": {
+								Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+							},
+							"IDVALANIMAL2": {
+								Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "IDValAnimal1"}},
+							},
+						},
+					},
+				},
+			},
+			"", // Depending on how the map is sorted, this could be one of two errors
+		},
+		{
+			"Collision between tag value labels",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+						Values: map[string]VocabularyTagValue{
+							"IDVALANIMAL1": {
+								Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+							},
+							"IDVALANIMAL2": {
+								Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "Mammal"}},
+							},
+						},
+					},
+				},
+			},
+			"tag value label.*for pair.*already seen.*on value.*",
+		},
+		{
+			"Collision between tag value labels (case-insensitive)",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: false,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+						Values: map[string]VocabularyTagValue{
+							"IDVALANIMAL1": {
+								Labels: []VocabularyLabel{{Label: "Human"}, {Label: "Mammal"}},
+							},
+							"IDVALANIMAL2": {
+								Labels: []VocabularyLabel{{Label: "Elephant"}, {Label: "mAMMAL"}},
+							},
+						},
+					},
+				},
+			},
+			"tag value label.*for pair.*already seen.*on value.*",
+		},
+		{
+			"Strict tag key, with no values",
+			&Vocabulary{
+				StrictTags: false,
+				Tags: map[string]VocabularyTag{
+					"IDTAGANIMALS": {
+						Strict: true,
+						Labels: []VocabularyLabel{{Label: "Animal"}, {Label: "Creature"}},
+					},
+				},
+			},
+			"tag key.*is configured as strict but doesn't provide values",
+		},
+	}
+	for _, tt := range tests {
+		c.Log(c.TestName()+" ", tt.name)
+		err := tt.voc.validate()
+		c.Assert(err, check.NotNil)
+		if tt.errMatches != "" {
+			c.Assert(err, check.ErrorMatches, tt.errMatches)
+		}
+	}
+}
diff --git a/sdk/go/arvadostest/api.go b/sdk/go/arvadostest/api.go
index 8bf01693c..0af477125 100644
--- a/sdk/go/arvadostest/api.go
+++ b/sdk/go/arvadostest/api.go
@@ -33,6 +33,10 @@ func (as *APIStub) ConfigGet(ctx context.Context) (json.RawMessage, error) {
 	as.appendCall(ctx, as.ConfigGet, nil)
 	return nil, as.Error
 }
+func (as *APIStub) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {
+	as.appendCall(ctx, as.VocabularyGet, nil)
+	return arvados.Vocabulary{}, as.Error
+}
 func (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {
 	as.appendCall(ctx, as.Login, options)
 	return arvados.LoginResponse{}, as.Error
@@ -165,6 +169,26 @@ func (as *APIStub) GroupUntrash(ctx context.Context, options arvados.UntrashOpti
 	as.appendCall(ctx, as.GroupUntrash, options)
 	return arvados.Group{}, as.Error
 }
+func (as *APIStub) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {
+	as.appendCall(ctx, as.LinkCreate, options)
+	return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {
+	as.appendCall(ctx, as.LinkUpdate, options)
+	return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {
+	as.appendCall(ctx, as.LinkGet, options)
+	return arvados.Link{}, as.Error
+}
+func (as *APIStub) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {
+	as.appendCall(ctx, as.LinkList, options)
+	return arvados.LinkList{}, as.Error
+}
+func (as *APIStub) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {
+	as.appendCall(ctx, as.LinkDelete, options)
+	return arvados.Link{}, as.Error
+}
 func (as *APIStub) SpecimenCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Specimen, error) {
 	as.appendCall(ctx, as.SpecimenCreate, options)
 	return arvados.Specimen{}, as.Error

commit 7de380d5e7dbc3361c15d48d92619b222b77f6f8
Author: Tom Clegg <tom at curii.com>
Date:   Wed Nov 10 11:13:09 2021 -0500

    18346: Do not forward locally-issued token to own login cluster.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/controller/federation/conn.go b/lib/controller/federation/conn.go
index aa05cb1e6..39e4f2676 100644
--- a/lib/controller/federation/conn.go
+++ b/lib/controller/federation/conn.go
@@ -37,7 +37,7 @@ func New(cluster *arvados.Cluster) *Conn {
 		if !remote.Proxy || id == cluster.ClusterID {
 			continue
 		}
-		conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(local, id))
+		conn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(cluster, local, id))
 		// Older versions of controller rely on the Via header
 		// to detect loops.
 		conn.SendHeader = http.Header{"Via": {"HTTP/1.1 arvados-controller"}}
@@ -55,7 +55,7 @@ func New(cluster *arvados.Cluster) *Conn {
 // tokens from an incoming request context, determines whether they
 // should (and can) be salted for the given remoteID, and returns the
 // resulting tokens.
-func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
+func saltedTokenProvider(cluster *arvados.Cluster, local backend, remoteID string) rpc.TokenProvider {
 	return func(ctx context.Context) ([]string, error) {
 		var tokens []string
 		incoming, ok := auth.FromContext(ctx)
@@ -63,6 +63,16 @@ func saltedTokenProvider(local backend, remoteID string) rpc.TokenProvider {
 			return nil, errors.New("no token provided")
 		}
 		for _, token := range incoming.Tokens {
+			if strings.HasPrefix(token, "v2/"+cluster.ClusterID+"-") && remoteID == cluster.Login.LoginCluster {
+				// If we did this, the login cluster
+				// would call back to us and then
+				// reject our response because the
+				// user UUID prefix (i.e., the
+				// LoginCluster prefix) won't match
+				// the token UUID prefix (i.e., our
+				// prefix).
+				return nil, httpErrorf(http.StatusUnauthorized, "cannot use a locally issued token to forward a request to our login cluster (%s)", remoteID)
+			}
 			salted, err := auth.SaltToken(token, remoteID)
 			switch err {
 			case nil:
diff --git a/lib/controller/federation/federation_test.go b/lib/controller/federation/federation_test.go
index fdc4d96cf..984d32dc3 100644
--- a/lib/controller/federation/federation_test.go
+++ b/lib/controller/federation/federation_test.go
@@ -93,5 +93,5 @@ func (s *FederationSuite) addHTTPRemote(c *check.C, id string, backend backend)
 		Host:   srv.Addr,
 		Proxy:  true,
 	}
-	s.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: "http", Host: srv.Addr}, true, saltedTokenProvider(s.fed.local, id))
+	s.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: "http", Host: srv.Addr}, true, saltedTokenProvider(s.cluster, s.fed.local, id))
 }
diff --git a/lib/controller/integration_test.go b/lib/controller/integration_test.go
index 8a23bccfb..f2f88eb25 100644
--- a/lib/controller/integration_test.go
+++ b/lib/controller/integration_test.go
@@ -961,3 +961,63 @@ func (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {
 		}
 	}
 }
+
+// z3333 should not forward a locally-issued container runtime token,
+// associated with a z1111 user, to its login cluster z1111. z1111
+// would only call back to z3333 and then reject the response because
+// the user ID does not match the token prefix. See
+// dev.arvados.org/issues/18346
+func (s *IntegrationSuite) TestForwardRuntimeTokenToLoginCluster(c *check.C) {
+	db3, db3conn := s.dbConn(c, "z3333")
+	defer db3.Close()
+	defer db3conn.Close()
+	rootctx1, _, _ := s.testClusters["z1111"].RootClients()
+	rootctx3, _, _ := s.testClusters["z3333"].RootClients()
+	conn1 := s.testClusters["z1111"].Conn()
+	conn3 := s.testClusters["z3333"].Conn()
+	userctx1, _, _, _ := s.testClusters["z1111"].UserClients(rootctx1, c, conn1, "user at example.com", true)
+
+	user1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})
+	c.Assert(err, check.IsNil)
+	c.Logf("user1 %+v", user1)
+
+	imageColl, err := conn3.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+		"manifest_text": ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar\n",
+	}})
+	c.Assert(err, check.IsNil)
+	c.Logf("imageColl %+v", imageColl)
+
+	cr, err := conn3.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{
+		"state":           "Committed",
+		"command":         []string{"echo"},
+		"container_image": imageColl.PortableDataHash,
+		"cwd":             "/",
+		"output_path":     "/",
+		"priority":        1,
+		"runtime_constraints": arvados.RuntimeConstraints{
+			VCPUs: 1,
+			RAM:   1000000000,
+		},
+	}})
+	c.Assert(err, check.IsNil)
+	c.Logf("container request %+v", cr)
+	ctr, err := conn3.ContainerLock(rootctx3, arvados.GetOptions{UUID: cr.ContainerUUID})
+	c.Assert(err, check.IsNil)
+	c.Logf("container %+v", ctr)
+
+	// We could use conn3.ContainerAuth() here, but that API
+	// hasn't been added to sdk/go/arvados/api.go yet.
+	row := db3conn.QueryRowContext(context.Background(), `SELECT api_token from api_client_authorizations where uuid=$1`, ctr.AuthUUID)
+	c.Check(row, check.NotNil)
+	var val sql.NullString
+	row.Scan(&val)
+	c.Assert(val.Valid, check.Equals, true)
+	runtimeToken := "v2/" + ctr.AuthUUID + "/" + val.String
+	ctrctx, _, _ := s.testClusters["z3333"].ClientsWithToken(runtimeToken)
+	c.Logf("container runtime token %+v", runtimeToken)
+
+	_, err = conn3.UserGet(ctrctx, arvados.GetOptions{UUID: user1.UUID})
+	c.Assert(err, check.NotNil)
+	c.Check(err, check.ErrorMatches, `request failed: .* 401 Unauthorized: cannot use a locally issued token to forward a request to our login cluster \(z1111\)`)
+	c.Check(err, check.Not(check.ErrorMatches), `(?ms).*127\.0\.0\.11.*`)
+}
diff --git a/sdk/go/arvados/container.go b/sdk/go/arvados/container.go
index 384bebb59..7c68bdb20 100644
--- a/sdk/go/arvados/container.go
+++ b/sdk/go/arvados/container.go
@@ -36,6 +36,7 @@ type Container struct {
 	RuntimeUserUUID           string                 `json:"runtime_user_uuid"`
 	RuntimeAuthScopes         []string               `json:"runtime_auth_scopes"`
 	RuntimeToken              string                 `json:"runtime_token"`
+	AuthUUID                  string                 `json:"auth_uuid"`
 }
 
 // ContainerRequest is an arvados#container_request resource.

commit 4c53d93b1c9356aea2c509fcfc79cc48aa0e2fa1
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Nov 4 16:06:03 2021 -0400

    Remve last reference to terraform for now.
    
    refs #17450
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index a0a38c3b9..fdfd05ead 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -101,7 +101,7 @@ The <i>multi_host</i> include LetsEncrypt salt code to automatically request and
 
 h3(#further_customization). Further customization of the installation (modifying the salt pillars and states)
 
-You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that can be retrieved as output of the terraform run.
+You will need further customization to suit your environment, which can be done editing the Saltstack pillars and states files. Pay particular attention to the <i>pillars/arvados.sls</i> file, where you will need to provide some information that describes your environment.
 
 Any extra <i>state</i> file you add under <i>local_config_dir/states</i> will be added to the salt run and applied to the hosts.
 

commit 17bf8752a8b110a95cde3683bc0a6c586fae3ac9
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Nov 4 15:59:26 2021 -0400

    Remove reference to terraform code that was never checked in.
    
    refs #17450
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index 0d7fb916e..a0a38c3b9 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -11,7 +11,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 # "Introduction":#introduction
 # "Hosts preparation":#hosts_preparation
-## "Hosts setup using terraform (experimental)":#hosts_setup_using_terraform
 ## "Create a compute image":#create_a_compute_image
 # "Multi host install using the provision.sh script":#multi_host
 # "Choose the desired configuration":#choose_configuration
@@ -65,14 +64,6 @@ Note that these hosts can be virtual machines in your infrastructure and they do
 
 Again, if your infrastructure differs from the setup proposed above (ie, using RDS or an existing DB server), remember that you will need to edit the configuration files for the scripts so they work with your infrastructure.
 
-
-h3(#hosts_setup_using_terraform). Hosts setup using terraform (AWS, experimental)
-
-We added a few "terraform":https://terraform.io/ scripts (https://github.com/arvados/arvados/tree/main/tools/terraform) to let you create these instances easier in an AWS account. Check "the Arvados terraform documentation":/doc/install/terraform.html for more details.
-
-
-
-
 h2(#multi_host). Multi host install using the provision.sh script
 
 {% include 'branchname' %}

commit c0ba291cfb28192b1a3255008aefaf13583fea97
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Nov 4 15:55:17 2021 -0400

    Make the linkchecker more strict so it objects to relative links within
    our doc tree that do not have the {{ site.baseurl }} prefix.
    
    Fix all the links that were incorrect that way.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/Rakefile b/doc/Rakefile
index 2b4b6af2e..4427f7822 100644
--- a/doc/Rakefile
+++ b/doc/Rakefile
@@ -160,7 +160,8 @@ task :linkchecker => [ :generate ] do
   Dir.chdir(".site") do
     `which linkchecker`
     if $? == 0
-      system "linkchecker index.html --ignore-url='!file://'" or exit $?.exitstatus
+      # we need --check-extern to check relative links, weird but true
+      system "linkchecker index.html --check-extern --ignore-url='!file://'" or exit $?.exitstatus
     else
       puts "Warning: linkchecker not found, skipping run".colorize(:light_red)
     end
diff --git a/doc/_includes/_tutorial_expectations.liquid b/doc/_includes/_tutorial_expectations.liquid
index 09b18f0d4..d4d05078f 100644
--- a/doc/_includes/_tutorial_expectations.liquid
+++ b/doc/_includes/_tutorial_expectations.liquid
@@ -5,5 +5,5 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin' %}
-This tutorial assumes that you have access to the "Arvados command line tools":/user/getting_started/setup-cli.html and have set the "API token":{{site.baseurl}}/user/reference/api-tokens.html and confirmed a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html .
+This tutorial assumes that you have access to the "Arvados command line tools":{{ site.baseurl }}/user/getting_started/setup-cli.html and have set the "API token":{{site.baseurl}}/user/reference/api-tokens.html and confirmed a "working environment.":{{site.baseurl}}/user/getting_started/check-environment.html .
 {% include 'notebox_end' %}
diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index 304eedb1d..5a5154ce5 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -396,7 +396,7 @@ h3. Python packaging change
 
 As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed.
 
-One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
+One practical consequence of this change is that the use of the Arvados Python SDK (aka "import arvados") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The "Install documentation for the Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).
 
 Python scripts that rely on the distribution Arvados Python SDK packages to import the Arvados SDK will need to be tweaked to load the correct Python environment.
 
diff --git a/doc/admin/user-activity.html.textile.liquid b/doc/admin/user-activity.html.textile.liquid
index 21bfb7655..01715ff6e 100644
--- a/doc/admin/user-activity.html.textile.liquid
+++ b/doc/admin/user-activity.html.textile.liquid
@@ -17,7 +17,7 @@ h2. Option 1: Install from a distribution package
 
 This installation method is recommended to make the CLI tools available system-wide. It can coexist with the installation method described in option 2, below.
 
-First, configure the "Arvados package repositories":../../install/packages.html
+First, configure the "Arvados package repositories":{{ site.baseurl }}/install/packages.html
 
 {% assign arvados_component = 'python3-arvados-user-activity' %}
 
@@ -31,7 +31,7 @@ Step 2: Change directory to @arvados/tools/user-activity@
 
 Step 3: Run @pip install .@ in an appropriate installation environment, such as a @virtualenv at .
 
-Note: depends on the "Arvados Python SDK":../sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).
+Note: depends on the "Arvados Python SDK":{{ site.baseurl }}/sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).
 
 h2. Usage
 
diff --git a/doc/api/permission-model.html.textile.liquid b/doc/api/permission-model.html.textile.liquid
index 82e8128c6..a44d2eefa 100644
--- a/doc/api/permission-model.html.textile.liquid
+++ b/doc/api/permission-model.html.textile.liquid
@@ -46,7 +46,7 @@ This grants the permission in @name@ for @tail_uuid@ accessing @head_uuid at .
 
 If a User has *can_manage* permission on some object, the user has the ability to read, create, update and delete permission links with @head_uuid@ of the managed object.  In other words, the user has the ability to modify the permission grants on the object.
 
-The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the CLI cheat sheet":/install/cheat_sheet.html#vm-login for an example.
+The *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the "VM login section on the 'User management at the CLI' page":{{ site.baseurl }}/admin/user-management-cli.html#vm-login for an example.
 
 h3. Transitive permissions
 
@@ -66,7 +66,7 @@ A "project" is a subtype of Group that is displayed as a "Project" in Workbench,
 * The name of a project is unique only among projects and filters with the same owner_uuid.
 * Projects can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@).  Putting a project in a @tail_uuid@ field is an error.
 
-A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount at . See "the groups API documentation":/api/methods/groups.html for more information.
+A "filter" is a subtype of Group that is displayed as a "Project" in Workbench, and as a directory by @arv-mount at . See "the groups API documentation":{{ site.baseurl }}/api/methods/groups.html for more information.
 * A filter group cannot own things (cannot appear in @owner_uuid@).  Putting a filter group in an @owner_uuid@ field is an error.
 * A filter group can be owned by a user or a project.
 * The name of a filter is unique only among projects and filters with the same owner_uuid.
diff --git a/doc/api/projects.html.textile.liquid b/doc/api/projects.html.textile.liquid
index b1c74fe0d..9aa3d85d4 100644
--- a/doc/api/projects.html.textile.liquid
+++ b/doc/api/projects.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Arvados @projects@ are used to organize objects. Projects can contain @collections@, @container requests@, @workflows@, etc. Projects can also contain other projects. An object is part of a project if the @owner_uuid@ of the object is set to the uuid of the project.
 
-Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":/api/methods/groups.html.
+Projects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value "project". More information is available in the "groups API reference":{{ site.baseurl }}/api/methods/groups.html.
 
 Projects can be manipulated via Workbench, the cli tools, the SDKs, and the Arvados APIs.
 
diff --git a/doc/architecture/keep-components-overview.html.textile.liquid b/doc/architecture/keep-components-overview.html.textile.liquid
index b07716aac..4b1ca9b84 100644
--- a/doc/architecture/keep-components-overview.html.textile.liquid
+++ b/doc/architecture/keep-components-overview.html.textile.liquid
@@ -14,13 +14,13 @@ Keep has a number of components. This page describes each component and the role
 h3. Keep clients for data access
 
 In order to access data in Keep, a client is needed to store data in and retrieve data from Keep. Different types of Keep clients exist:
-* a command line client like "@arv-get@":/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":/user/tutorials/tutorial-keep.html#upload-using-command
-* a FUSE mount provided by "@arv-mount@":/user/tutorials/tutorial-keep-mount-gnu-linux.html
+* a command line client like "@arv-get@":{{ site.baseurl }}/user/tutorials/tutorial-keep-get.html#download-using-arv or "@arv-put@":{{ site.baseurl }}/user/tutorials/tutorial-keep.html#upload-using-command
+* a FUSE mount provided by "@arv-mount@":{{ site.baseurl }}/user/tutorials/tutorial-keep-mount-gnu-linux.html
 * a WebDAV mount provided by @keep-web@
 * an S3-compatible endpoint provided by @keep-web@
-* programmatic access via the "Arvados SDKs":/sdk/index.html
+* programmatic access via the "Arvados SDKs":{{ site.baseurl }}/sdk/index.html
 
-In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":/architecture/keep-clients.html.
+In essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in "the next section":{{ site.baseurl }}/architecture/keep-clients.html.
 
 For example, when a request comes in to read a file from Keep, the client will
 * request the collection object (including its manifest) from the API server
@@ -32,7 +32,7 @@ All of those steps are subject to access control, which applies at the level of
 
 h3. API server
 
-The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
+The API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when "storage classes":{{ site.baseurl }}/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.
 
 h3. Keepstore
 
diff --git a/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid b/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
index f7b7a1641..5a5d59bc8 100644
--- a/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
+++ b/doc/install/arvados-on-kubernetes-GKE.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This page documents setting up and running the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
+This page documents setting up and running the "Arvados on Kubernetes":{{ site.baseurl }}/install/arvados-on-kubernetes.html @Helm@ chart on @Google Kubernetes Engine@ (GKE).
 
 h2. Prerequisites
 
diff --git a/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid b/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
index 9ecb2c895..6b292caf3 100644
--- a/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
+++ b/doc/install/arvados-on-kubernetes-minikube.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This page documents setting up and running the "Arvados on Kubernetes":/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube at .
+This page documents setting up and running the "Arvados on Kubernetes":{{ site.baseurl }}/install/arvados-on-kubernetes.html @Helm@ chart on @Minikube at .
 
 h2. Prerequisites
 
diff --git a/doc/install/arvados-on-kubernetes.html.textile.liquid b/doc/install/arvados-on-kubernetes.html.textile.liquid
index 9169b7810..5ef757d10 100644
--- a/doc/install/arvados-on-kubernetes.html.textile.liquid
+++ b/doc/install/arvados-on-kubernetes.html.textile.liquid
@@ -28,5 +28,5 @@ h2. Requirements
 * Minikube or Google Kubernetes Engine (Kubernetes 1.10+ with at least 3 nodes, 2+ cores per node)
 * @kubectl@ and @Helm 3@ installed locally, and able to connect to your Kubernetes cluster
 
-Please refer to "Arvados on Minikube":/install/arvados-on-kubernetes-minikube.html or "Arvados on GKE":/install/arvados-on-kubernetes-GKE.html for detailed installation instructions.
+Please refer to "Arvados on Minikube":{{ site.baseurl }}/install/arvados-on-kubernetes-minikube.html or "Arvados on GKE":{{ site.baseurl }}/install/arvados-on-kubernetes-GKE.html for detailed installation instructions.
 
diff --git a/doc/install/crunch2/install-compute-node-docker.html.textile.liquid b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
index 876bb6ae5..66bd85b7c 100644
--- a/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.
 {% include 'notebox_end' %}
 
 {% include 'notebox_begin_warning' %}
diff --git a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
index 09a3b4e3a..8e9db0c4e 100644
--- a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.
 {% include 'notebox_end' %}
 
 {% include 'notebox_begin_warning' %}
diff --git a/doc/user/topics/arvados-sync-groups.html.textile.liquid b/doc/user/topics/arvados-sync-groups.html.textile.liquid
index 26be56782..1f7eede4b 100644
--- a/doc/user/topics/arvados-sync-groups.html.textile.liquid
+++ b/doc/user/topics/arvados-sync-groups.html.textile.liquid
@@ -19,7 +19,7 @@ Every line on the file should have 3 values: a group name, a local user identifi
 
 Users can be identified by their email address or username: the tool will check if every user exist on the system, and report back when not found. Groups on the other hand, are identified by their name.
 
-Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":/admin/group-management.html.
+Permission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the "group management admin guide":{{ site.baseurl }}/admin/group-management.html.
 
 This tool is designed to be run periodically reading a file created by a remote auth system (ie: LDAP) dump script, applying what's included on the file as the source of truth.
 

commit 7d39fd29dbf5b6b9bad90cee69dce24498f3e5ed
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Fri Oct 29 17:01:19 2021 -0400

    Set arvbox DEFAULT_TAG=2.3.0
    
    no issue #
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/tools/arvbox/bin/arvbox b/tools/arvbox/bin/arvbox
index 36a33376a..7ae11cf5d 100755
--- a/tools/arvbox/bin/arvbox
+++ b/tools/arvbox/bin/arvbox
@@ -61,7 +61,7 @@ if test -z "$WORKBENCH2_BRANCH" ; then
 fi
 
 # Update this to the docker tag for the version on releases.
-DEFAULT_TAG=
+DEFAULT_TAG=2.3.0
 
 PG_DATA="$ARVBOX_DATA/postgres"
 VAR_DATA="$ARVBOX_DATA/var"

commit d588e6bca2e886dc978d3cd8ca17002d41fbe585
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Fri Oct 29 16:59:55 2021 -0400

    Fix arvbox versioning.  no issue #
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/install/arvbox.html.textile.liquid b/doc/install/arvbox.html.textile.liquid
index a8235ee70..8e787d5a2 100644
--- a/doc/install/arvbox.html.textile.liquid
+++ b/doc/install/arvbox.html.textile.liquid
@@ -14,9 +14,9 @@ Arvbox is a Docker-based self-contained development, demonstration and testing e
 h2. Quick start
 
 <pre>
-$ curl -O https://git.arvados.org/arvados.git/blob_plain/refs/heads/main:/tools/arvbox/bin/arvbox
+$ curl -O https://git.arvados.org/arvados.git/blob_plain/refs/heads/2.3-dev:/tools/arvbox/bin/arvbox
 $ chmod +x arvbox
-$ ./arvbox start localdemo latest
+$ ./arvbox start localdemo 2.3.0
 $ ./arvbox adduser demouser demo at example.com
 </pre>
 

commit 6c8a0923515a0c9e085fa852de3a48f849a742fe
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Thu Sep 9 10:16:49 2021 -0400

    Tweak docker image tagging
    
    no issue #
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/build/run-build-docker-images.sh b/build/run-build-docker-images.sh
index 8cff14b71..00ef2de41 100755
--- a/build/run-build-docker-images.sh
+++ b/build/run-build-docker-images.sh
@@ -85,11 +85,14 @@ docker_push () {
     # docker always creates a local 'latest' tag, and we don't want to push that
     # tag in every case. Remove it.
     docker rmi $1:latest
+
+    GITHEAD=$(cd $WORKSPACE && git log --format=%H -n1 HEAD)
+
     if [[ ! -z "$tags" ]]
     then
         for tag in $( echo $tags|tr "," " " )
         do
-             $DOCKER tag $1 $1:$tag
+             $DOCKER tag $1:$GITHEAD $1:$tag
         done
     fi
 

commit fb39000148809df935ada41cd3be373fde268c57
Author: Peter Amstutz <peter.amstutz at curii.com>
Date:   Wed Oct 27 14:37:47 2021 -0400

    Fix top of upgrade notes to 2.3.0
    
    refs #18130
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index 399ec35d2..304eedb1d 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -35,9 +35,9 @@ TODO: extract this information based on git commit messages and generate changel
 <div class="releasenotes">
 </notextile>
 
-h2(#main). development main (as of 2021-09-07)
+h2(#v2_3_0). v2.3.0 (2021-10-27)
 
-"Upgrading from 2.2.0":#v2_2_0
+"previous: Upgrading from 2.2.0":#v2_2_0
 
 h3. Ubuntu 18.04 packages for arvados-api-server and arvados-workbench now conflict with ruby-bundler
 
@@ -69,7 +69,7 @@ Typically a docker image collection contains a single @.tar@ file at the top lev
 
 h2(#v2_2_0). v2.2.0 (2021-06-03)
 
-"Upgrading from 2.1.0":#v2_1_0
+"previous: Upgrading from 2.1.0":#v2_1_0
 
 h3. New spelling of S3 credential configs
 

commit f0449aa6f4d490de26e5240202a8e77113d4c471
Author: Ward Vandewege <ward at curii.com>
Date:   Tue Oct 26 14:28:23 2021 -0400

    Since CWL defaults to 256M of ram, make that the suggested amount for
    the `mksquashfs mem` setting in singularity.conf.
    
    No issue #
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_includes/_singularity_mksquashfs_configuration.liquid b/doc/_includes/_singularity_mksquashfs_configuration.liquid
index dc0c394ba..e31e801c1 100644
--- a/doc/_includes/_singularity_mksquashfs_configuration.liquid
+++ b/doc/_includes/_singularity_mksquashfs_configuration.liquid
@@ -12,4 +12,4 @@ This section is only relevant when using Singularity. Skip this section when usi
 {% include 'notebox_end' %}
 {% endif %}
 
-Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 512M at . The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory.
+Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 256M at . The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory. The default memory allocation in CWL is 256M, so that is also a good choice for the @mksquashfs mem@ setting.
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index 7c922e28d..a56519fb1 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -59,7 +59,7 @@ foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
 
 {% include 'singularity_mksquashfs_configuration' %}
 
-The desired amount of memory to make available for @mksquashfs@ can be configured in an argument to the build script, see the next section. It defaults to @512M at .
+The desired amount of memory to make available for @mksquashfs@ can be configured in an argument to the build script, see the next section. It defaults to @256M at .
 
 h2(#building). The build script
 
@@ -111,7 +111,7 @@ Options:
       Set this to "-dev" to track the unstable/dev Arvados repositories
   --public-key-file (required)
       Path to the public key file that a-d-c will use to log into the compute node
-  --mksquashfs-mem (default: 512M)
+  --mksquashfs-mem (default: 256M)
       Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
   --debug
       Output debug information (default: false)
diff --git a/tools/compute-images/build.sh b/tools/compute-images/build.sh
index a714bafc1..526db4906 100755
--- a/tools/compute-images/build.sh
+++ b/tools/compute-images/build.sh
@@ -55,7 +55,7 @@ Options:
       Set this to "-dev" to track the unstable/dev Arvados repositories
   --public-key-file (required)
       Path to the public key file that a-d-c will use to log into the compute node
-  --mksquashfs-mem (default: 512M)
+  --mksquashfs-mem (default: 256M)
       Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
   --debug
       Output debug information (default: false)
@@ -80,7 +80,7 @@ DEBUG=
 SSH_USER=
 AWS_DEFAULT_REGION=us-east-1
 PUBLIC_KEY_FILE=
-MKSQUASHFS_MEM=512M
+MKSQUASHFS_MEM=256M
 
 PARSEDOPTS=$(getopt --name "$0" --longoptions \
     help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,mksquashfs-mem:,debug \

commit 067a68b5e9dfa1c7d5e68fd64553e0ced89cad36
Author: Ward Vandewege <ward at jhvc.com>
Date:   Mon Oct 25 10:15:21 2021 -0400

    18290: address review comments.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 8b51a85d9..52e35d83f 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -1026,7 +1026,7 @@ Clusters:
         # Template variables starting with % will be substituted as follows:
         #
         # %U uuid
-        # %C number of cpus
+        # %C number of VCPUs
         # %M memory in MB
         # %T tmp in MB
         #
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index 519d1a8e5..c58bbe178 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -1032,7 +1032,7 @@ Clusters:
         # Template variables starting with % will be substituted as follows:
         #
         # %U uuid
-        # %C number of cpus
+        # %C number of VCPUs
         # %M memory in MB
         # %T tmp in MB
         #
diff --git a/lib/lsf/dispatch.go b/lib/lsf/dispatch.go
index d17c458e8..6e35b7de9 100644
--- a/lib/lsf/dispatch.go
+++ b/lib/lsf/dispatch.go
@@ -270,9 +270,7 @@ func (disp *dispatcher) bkill(ctr arvados.Container) {
 }
 
 func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error) {
-	tmpArgs := []string{}
 	args := []string{"bsub"}
-	tmpArgs = append(tmpArgs, disp.Cluster.Containers.LSF.BsubArgumentsList...)
 
 	tmp := int64(math.Ceil(float64(dispatchcloud.EstimateScratchSpace(&container)) / 1048576))
 	vcpus := container.RuntimeConstraints.VCPUs
@@ -280,16 +278,27 @@ func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error)
 		container.RuntimeConstraints.KeepCacheRAM+
 		int64(disp.Cluster.Containers.ReserveExtraRAM)) / 1048576))
 
-	r := regexp.MustCompile(`([^%]|^)%([^%])`)
-	undoubleRE := regexp.MustCompile(`%%`)
-	for _, a := range tmpArgs {
-		tmp := r.ReplaceAllStringFunc(a, func(m string) string {
-			parts := r.FindStringSubmatch(m)
-			return parts[1] + disp.substitute(parts[2], container.UUID, vcpus, mem, tmp)
-		})
-		// handle escaped literal % symbols
-		tmp = undoubleRE.ReplaceAllString(tmp, "%")
-		args = append(args, tmp)
+	repl := map[string]string{
+		"%%": "%",
+		"%C": fmt.Sprintf("%d", vcpus),
+		"%M": fmt.Sprintf("%d", mem),
+		"%T": fmt.Sprintf("%d", tmp),
+		"%U": container.UUID,
+	}
+
+	re := regexp.MustCompile(`%.`)
+	var substitutionErrors string
+	for _, a := range disp.Cluster.Containers.LSF.BsubArgumentsList {
+		args = append(args, re.ReplaceAllStringFunc(a, func(s string) string {
+			subst := repl[s]
+			if len(subst) == 0 {
+				substitutionErrors += fmt.Sprintf("Unknown substitution parameter %s in BsubArgumentsList, ", s)
+			}
+			return subst
+		}))
+	}
+	if len(substitutionErrors) != 0 {
+		return nil, fmt.Errorf("%s", substitutionErrors[:len(substitutionErrors)-2])
 	}
 
 	if u := disp.Cluster.Containers.LSF.BsubSudoUser; u != "" {
@@ -298,23 +307,6 @@ func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error)
 	return args, nil
 }
 
-func (disp *dispatcher) substitute(l string, uuid string, vcpus int, mem, tmp int64) string {
-	var arg string
-	switch l {
-	case "C":
-		arg = fmt.Sprintf("%d", vcpus)
-	case "T":
-		arg = fmt.Sprintf("%d", tmp)
-	case "M":
-		arg = fmt.Sprintf("%d", mem)
-	case "U":
-		arg = uuid
-	default:
-		arg = "%" + l
-	}
-	return arg
-}
-
 // Check the next bjobs report, and invoke TrackContainer for all the
 // containers in the report. This gives us a chance to cancel existing
 // Arvados LSF jobs (started by a previous dispatch process) that

commit d415db42e227d2f309d942486b7d2fcb431da628
Author: Ward Vandewege <ward at curii.com>
Date:   Sat Oct 23 11:42:14 2021 -0400

    18290: LSF: make the bsub arguments completely configurable.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 4e2a0e26d..8b51a85d9 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -1021,14 +1021,23 @@ Clusters:
           AssignNodeHostname: "compute%<slot_number>d"
 
       LSF:
-        # Additional arguments to bsub when submitting Arvados
-        # containers as LSF jobs.
+        # Arguments to bsub when submitting Arvados containers as LSF jobs.
+        #
+        # Template variables starting with % will be substituted as follows:
+        #
+        # %U uuid
+        # %C number of cpus
+        # %M memory in MB
+        # %T tmp in MB
+        #
+        # Use %% to express a literal %. The %%J in the default will be changed
+        # to %J, which is interpreted by bsub itself.
         #
         # Note that the default arguments cause LSF to write two files
         # in /tmp on the compute node each time an Arvados container
         # runs. Ensure you have something in place to delete old files
-        # from /tmp, or adjust these arguments accordingly.
-        BsubArgumentsList: ["-o", "/tmp/crunch-run.%J.out", "-e", "/tmp/crunch-run.%J.err"]
+        # from /tmp, or adjust the "-o" and "-e" arguments accordingly.
+        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]"]
 
         # Use sudo to switch to this user account when submitting LSF
         # jobs.
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index 875939a3e..519d1a8e5 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -1027,14 +1027,23 @@ Clusters:
           AssignNodeHostname: "compute%<slot_number>d"
 
       LSF:
-        # Additional arguments to bsub when submitting Arvados
-        # containers as LSF jobs.
+        # Arguments to bsub when submitting Arvados containers as LSF jobs.
+        #
+        # Template variables starting with % will be substituted as follows:
+        #
+        # %U uuid
+        # %C number of cpus
+        # %M memory in MB
+        # %T tmp in MB
+        #
+        # Use %% to express a literal %. The %%J in the default will be changed
+        # to %J, which is interpreted by bsub itself.
         #
         # Note that the default arguments cause LSF to write two files
         # in /tmp on the compute node each time an Arvados container
         # runs. Ensure you have something in place to delete old files
-        # from /tmp, or adjust these arguments accordingly.
-        BsubArgumentsList: ["-o", "/tmp/crunch-run.%J.out", "-e", "/tmp/crunch-run.%J.err"]
+        # from /tmp, or adjust the "-o" and "-e" arguments accordingly.
+        BsubArgumentsList: ["-o", "/tmp/crunch-run.%%J.out", "-e", "/tmp/crunch-run.%%J.err", "-J", "%U", "-n", "%C", "-D", "%MMB", "-R", "rusage[mem=%MMB:tmp=%TMB] span[hosts=1]"]
 
         # Use sudo to switch to this user account when submitting LSF
         # jobs.
diff --git a/lib/lsf/dispatch.go b/lib/lsf/dispatch.go
index d3ba605ab..d17c458e8 100644
--- a/lib/lsf/dispatch.go
+++ b/lib/lsf/dispatch.go
@@ -270,28 +270,49 @@ func (disp *dispatcher) bkill(ctr arvados.Container) {
 }
 
 func (disp *dispatcher) bsubArgs(container arvados.Container) ([]string, error) {
+	tmpArgs := []string{}
 	args := []string{"bsub"}
-	args = append(args, disp.Cluster.Containers.LSF.BsubArgumentsList...)
-	args = append(args, "-J", container.UUID)
-	args = append(args, disp.bsubConstraintArgs(container)...)
-	if u := disp.Cluster.Containers.LSF.BsubSudoUser; u != "" {
-		args = append([]string{"sudo", "-E", "-u", u}, args...)
-	}
-	return args, nil
-}
+	tmpArgs = append(tmpArgs, disp.Cluster.Containers.LSF.BsubArgumentsList...)
 
-func (disp *dispatcher) bsubConstraintArgs(container arvados.Container) []string {
-	// TODO: propagate container.SchedulingParameters.Partitions
 	tmp := int64(math.Ceil(float64(dispatchcloud.EstimateScratchSpace(&container)) / 1048576))
 	vcpus := container.RuntimeConstraints.VCPUs
 	mem := int64(math.Ceil(float64(container.RuntimeConstraints.RAM+
 		container.RuntimeConstraints.KeepCacheRAM+
 		int64(disp.Cluster.Containers.ReserveExtraRAM)) / 1048576))
-	return []string{
-		"-n", fmt.Sprintf("%d", vcpus),
-		"-D", fmt.Sprintf("%dMB", mem), // ulimit -d (note this doesn't limit the total container memory usage)
-		"-R", fmt.Sprintf("rusage[mem=%dMB:tmp=%dMB] span[hosts=1]", mem, tmp),
+
+	r := regexp.MustCompile(`([^%]|^)%([^%])`)
+	undoubleRE := regexp.MustCompile(`%%`)
+	for _, a := range tmpArgs {
+		tmp := r.ReplaceAllStringFunc(a, func(m string) string {
+			parts := r.FindStringSubmatch(m)
+			return parts[1] + disp.substitute(parts[2], container.UUID, vcpus, mem, tmp)
+		})
+		// handle escaped literal % symbols
+		tmp = undoubleRE.ReplaceAllString(tmp, "%")
+		args = append(args, tmp)
+	}
+
+	if u := disp.Cluster.Containers.LSF.BsubSudoUser; u != "" {
+		args = append([]string{"sudo", "-E", "-u", u}, args...)
+	}
+	return args, nil
+}
+
+func (disp *dispatcher) substitute(l string, uuid string, vcpus int, mem, tmp int64) string {
+	var arg string
+	switch l {
+	case "C":
+		arg = fmt.Sprintf("%d", vcpus)
+	case "T":
+		arg = fmt.Sprintf("%d", tmp)
+	case "M":
+		arg = fmt.Sprintf("%d", mem)
+	case "U":
+		arg = uuid
+	default:
+		arg = "%" + l
 	}
+	return arg
 }
 
 // Check the next bjobs report, and invoke TrackContainer for all the
diff --git a/lib/lsf/dispatch_test.go b/lib/lsf/dispatch_test.go
index 44a1a3d8c..641453e54 100644
--- a/lib/lsf/dispatch_test.go
+++ b/lib/lsf/dispatch_test.go
@@ -72,11 +72,10 @@ func (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...
 		switch prog {
 		case "bsub":
 			defaultArgs := s.disp.Cluster.Containers.LSF.BsubArgumentsList
-			c.Assert(len(args) > len(defaultArgs), check.Equals, true)
-			c.Check(args[:len(defaultArgs)], check.DeepEquals, defaultArgs)
-			args = args[len(defaultArgs):]
-
-			c.Check(args[0], check.Equals, "-J")
+			c.Assert(len(args), check.Equals, len(defaultArgs))
+			// %%J must have been rewritten to %J
+			c.Check(args[1], check.Equals, "/tmp/crunch-run.%J.out")
+			args = args[4:]
 			switch args[1] {
 			case arvadostest.LockedContainerUUID:
 				c.Check(args, check.DeepEquals, []string{

commit 308c90af198f5dd6b25ac284fe24aa8e648bc6d8
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 14:19:42 2021 -0400

    18281: Rephrase the help text for --force-rotate option.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/login-sync/bin/arvados-login-sync b/services/login-sync/bin/arvados-login-sync
index 92b9f9a2c..da8a21efa 100755
--- a/services/login-sync/bin/arvados-login-sync
+++ b/services/login-sync/bin/arvados-login-sync
@@ -21,7 +21,7 @@ end
 options = {}
 OptionParser.new do |parser|
   parser.on('--exclusive', 'Manage SSH keys file exclusively.')
-  parser.on('--rotate-tokens', 'Always create new user tokens. Usually needed with --token-lifetime.')
+  parser.on('--rotate-tokens', 'Force a rotation of all user tokens.')
   parser.on('--skip-missing-users', "Don't try to create any local accounts.")
   parser.on('--token-lifetime SECONDS', 'Create user tokens that expire after SECONDS.', Integer)
   parser.on('--debug', 'Enable debug output')

commit 0b8994f341459e4e6f3ed7cfb9e38109529d632e
Author: Ward Vandewege <ward at curii.com>
Date:   Wed Oct 20 16:28:30 2021 -0400

    18281: make arvados-login-sync smart enough to replace expired tokens.
           Also add a --debug parameter and some debug output.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/login-sync/bin/arvados-login-sync b/services/login-sync/bin/arvados-login-sync
index 8e5c6deb5..92b9f9a2c 100755
--- a/services/login-sync/bin/arvados-login-sync
+++ b/services/login-sync/bin/arvados-login-sync
@@ -24,6 +24,7 @@ OptionParser.new do |parser|
   parser.on('--rotate-tokens', 'Always create new user tokens. Usually needed with --token-lifetime.')
   parser.on('--skip-missing-users', "Don't try to create any local accounts.")
   parser.on('--token-lifetime SECONDS', 'Create user tokens that expire after SECONDS.', Integer)
+  parser.on('--debug', 'Enable debug output')
 end.parse!(into: options)
 
 exclusive_banner = "#######################################################################################
@@ -35,6 +36,10 @@ end_banner = "### END Arvados-managed keys -- changes between markers will be ov
 keys = ''
 
 begin
+  debug = false
+  if options[:"debug"]
+    debug = true
+  end
   arv = Arvados.new({ :suppress_ssl_warnings => false })
   logincluster_arv = Arvados.new({ :api_host => (ENV['LOGINCLUSTER_ARVADOS_API_HOST'] || ENV['ARVADOS_API_HOST']),
                                    :api_token => (ENV['LOGINCLUSTER_ARVADOS_API_TOKEN'] || ENV['ARVADOS_API_TOKEN']),
@@ -75,7 +80,7 @@ begin
         end
       else
         if pwnam[l[:username]].uid < uid_min
-          STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping"
+          STDERR.puts "Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping" if debug
           true
         end
       end
@@ -85,6 +90,7 @@ begin
 
   # Collect all keys
   logins.each do |l|
+    STDERR.puts("Considering #{l[:username]} ...") if debug
     keys[l[:username]] = Array.new() if not keys.has_key?(l[:username])
     key = l[:public_key]
     if !key.nil?
@@ -197,7 +203,32 @@ begin
     tokenfile = File.join(configarvados, "settings.conf")
 
     begin
-      if !File.exist?(tokenfile) || options[:"rotate-tokens"]
+      STDERR.puts "Processing #{tokenfile} ..." if debug
+      newToken = false
+      if File.exist?(tokenfile)
+        # check if the token is still valid
+        myToken = ENV["ARVADOS_API_TOKEN"]
+        userEnv = IO::read(tokenfile)
+        if (m = /^ARVADOS_API_TOKEN=(.*?\n)/m.match(userEnv))
+          begin
+            tmp_arv = Arvados.new({ :api_host => (ENV['LOGINCLUSTER_ARVADOS_API_HOST'] || ENV['ARVADOS_API_HOST']),
+                                   :api_token => (m[1]),
+                      :suppress_ssl_warnings => false })
+            tmp_arv.user.current
+          rescue Arvados::TransactionFailedError => e
+            if e.to_s =~ /401 Unauthorized/
+              STDERR.puts "Account #{l[:username]} token not valid, creating new token."
+              newToken = true
+            else
+              raise
+            end
+          end
+        end
+      elsif !File.exist?(tokenfile) || options[:"rotate-tokens"]
+        STDERR.puts "Account #{l[:username]} token file not found, creating new token."
+        newToken = true
+      end
+      if newToken
         aca_params = {owner_uuid: l[:user_uuid], api_client_id: 0}
         if options[:"token-lifetime"] && options[:"token-lifetime"] > 0
           aca_params.merge!(expires_at: (Time.now + options[:"token-lifetime"]))

commit 2e921a511f4c5fb93f5bd1299b7a66b830440a8e
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 16:27:59 2021 -0400

    18288: when storing the anonymous user token, make sure to clear the
           expires_at field.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/api/script/get_anonymous_user_token.rb b/services/api/script/get_anonymous_user_token.rb
index 8775ae595..4c3ca34f0 100755
--- a/services/api/script/get_anonymous_user_token.rb
+++ b/services/api/script/get_anonymous_user_token.rb
@@ -58,6 +58,9 @@ def create_api_client_auth(supplied_token=nil)
 
   api_client_auth = ApiClientAuthorization.where(attr).first
   if !api_client_auth
+    # The anonymous user token should never expire but we are not allowed to
+    # set :expires_at to nil, so we set it to 1000 years in the future.
+    attr[:expires_at] = Time.now + 1000.years
     api_client_auth = ApiClientAuthorization.create!(attr)
   end
   api_client_auth

commit 1b95927d6b17cfa2a4c8a8f20bee7dafa59e3d34
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 16:16:04 2021 -0400

    18288: when running the db:check_long_lived_tokens and
           db:fix_long_lived_tokens rake tasks, do not expire the
           anonymouspublic token.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/services/api/lib/tasks/manage_long_lived_tokens.rake b/services/api/lib/tasks/manage_long_lived_tokens.rake
index d83c2b603..7a665ff7e 100644
--- a/services/api/lib/tasks/manage_long_lived_tokens.rake
+++ b/services/api/lib/tasks/manage_long_lived_tokens.rake
@@ -29,7 +29,7 @@ namespace :db do
         # skip this token
         next
       end
-      if (auth.user.uuid =~ /-tpzed-000000000000000/).nil?
+      if (auth.user.uuid =~ /-tpzed-000000000000000/).nil? and (auth.user.uuid =~ /-tpzed-anonymouspublic/).nil?
         CurrentApiClientHelper.act_as_system_user do
           auth.update_attributes!(expires_at: exp_date)
         end
@@ -58,7 +58,7 @@ namespace :db do
         # skip this token
         next
       end
-      if not auth.user.nil? and (auth.user.uuid =~ /-tpzed-000000000000000/).nil?
+      if not auth.user.nil? and (auth.user.uuid =~ /-tpzed-000000000000000/).nil? and (auth.user.uuid =~ /-tpzed-anonymouspublic/).nil?
         user_ids.add(auth.user_id)
         token_count += 1
       end

commit 1e008042ac7a5b7dfe4a11a8f33f71c57ee2666a
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 22 12:47:17 2021 -0400

    18289: add support to set `mksquashfs mem` for Singularity in our
           compute node image builder for cloud setups.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/tools/compute-images/arvados-images-aws.json b/tools/compute-images/arvados-images-aws.json
index 4d757abfd..b1b4c909d 100644
--- a/tools/compute-images/arvados-images-aws.json
+++ b/tools/compute-images/arvados-images-aws.json
@@ -8,6 +8,7 @@
     "aws_source_ami": "ami-04d70e069399af2e9",
     "build_environment": "aws",
     "public_key_file": "",
+    "mksquashfs_mem": "",
     "reposuffix": "",
     "resolver": "",
     "ssh_user": "admin",
@@ -76,6 +77,6 @@
     "type": "shell",
     "execute_command": "sudo -S env {{ .Vars }} /bin/bash '{{ .Path }}'",
     "script": "scripts/base.sh",
-    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}"]
+    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}","MKSQUASHFS_MEM={{user `mksquashfs_mem`}}"]
   }]
 }
diff --git a/tools/compute-images/arvados-images-azure.json b/tools/compute-images/arvados-images-azure.json
index ec1d9b6a6..20f776d04 100644
--- a/tools/compute-images/arvados-images-azure.json
+++ b/tools/compute-images/arvados-images-azure.json
@@ -10,6 +10,7 @@
     "location": "centralus",
     "project_id": "",
     "public_key_file": "",
+    "mksquashfs_mem": "",
     "reposuffix": "",
     "resolver": "",
     "resource_group": null,
@@ -65,6 +66,6 @@
     "type": "shell",
     "execute_command": "sudo -S env {{ .Vars }} /bin/bash '{{ .Path }}'",
     "script": "scripts/base.sh",
-    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}"]
+    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}","MKSQUASHFS_MEM={{user `mksquashfs_mem`}}"]
   }]
 }
diff --git a/tools/compute-images/build.sh b/tools/compute-images/build.sh
index a2dd2ed28..a714bafc1 100755
--- a/tools/compute-images/build.sh
+++ b/tools/compute-images/build.sh
@@ -55,6 +55,8 @@ Options:
       Set this to "-dev" to track the unstable/dev Arvados repositories
   --public-key-file (required)
       Path to the public key file that a-d-c will use to log into the compute node
+  --mksquashfs-mem (default: 512M)
+      Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
   --debug
       Output debug information (default: false)
 
@@ -78,9 +80,10 @@ DEBUG=
 SSH_USER=
 AWS_DEFAULT_REGION=us-east-1
 PUBLIC_KEY_FILE=
+MKSQUASHFS_MEM=512M
 
 PARSEDOPTS=$(getopt --name "$0" --longoptions \
-    help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,debug \
+    help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,mksquashfs-mem:,debug \
     -- "" "$@")
 if [ $? -ne 0 ]; then
     exit 1
@@ -154,6 +157,9 @@ while [ $# -gt 0 ]; do
         --public-key-file)
             PUBLIC_KEY_FILE="$2"; shift
             ;;
+        --mksquashfs-mem)
+            MKSQUASHFS_MEM="$2"; shift
+            ;;
         --debug)
             # If you want to debug a build issue, add the -debug flag to the build
             # command in question.
@@ -256,6 +262,10 @@ fi
 if [[ "$PUBLIC_KEY_FILE" != "" ]]; then
   EXTRA2+=" -var public_key_file=$PUBLIC_KEY_FILE"
 fi
+if [[ "$MKSQUASHFS_MEM" != "" ]]; then
+  EXTRA2+=" -var mksquashfs_mem=$MKSQUASHFS_MEM"
+fi
+
 
 echo
 packer version
diff --git a/tools/compute-images/scripts/base.sh b/tools/compute-images/scripts/base.sh
index 022f4a7e5..0ab51223b 100644
--- a/tools/compute-images/scripts/base.sh
+++ b/tools/compute-images/scripts/base.sh
@@ -89,6 +89,11 @@ make -C ./builddir
 make -C ./builddir install
 ln -sf /var/lib/arvados/bin/* /usr/local/bin/
 
+# set `mksquashfs mem` in the singularity config file if it is configured
+if [ "$MKSQUASHFS_MEM" != "" ]; then
+  echo "mksquashfs mem = ${MKSQUASHFS_MEM}" >> /var/lib/arvados/etc/singularity/singularity.conf
+fi
+
 # Print singularity version installed
 singularity --version
 

commit bcb56b17389d162a53546c5efaf288ba446b7f84
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 22 12:46:45 2021 -0400

    18289: Address review comments (documentation).
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_config.yml b/doc/_config.yml
index 8cc4c398e..31db9c41d 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -261,7 +261,6 @@ navbar:
     - Containers API (LSF):
       - install/crunch2-lsf/install-dispatch.html.textile.liquid
     - Additional configuration:
-      - install/singularity.html.textile.liquid
       - install/container-shell-access.html.textile.liquid
     - External dependencies:
       - install/install-postgresql.html.textile.liquid
diff --git a/doc/_includes/_singularity_mksquashfs_configuration.liquid b/doc/_includes/_singularity_mksquashfs_configuration.liquid
new file mode 100644
index 000000000..dc0c394ba
--- /dev/null
+++ b/doc/_includes/_singularity_mksquashfs_configuration.liquid
@@ -0,0 +1,15 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2(#singularity_mksquashfs_configuration). Singularity mksquashfs configuration
+
+{% if show_docker_warning != nil %}
+{% include 'notebox_begin_warning' %}
+This section is only relevant when using Singularity. Skip this section when using Docker.
+{% include 'notebox_end' %}
+{% endif %}
+
+Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 512M at . The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory.
diff --git a/doc/architecture/singularity.html.textile.liquid b/doc/architecture/singularity.html.textile.liquid
index a94af598b..9a82cd93d 100644
--- a/doc/architecture/singularity.html.textile.liquid
+++ b/doc/architecture/singularity.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. See the "Singularity page in the installation guide":{{ site.baseurl }}/install/singularity.html for configuration details.
+Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. For on premises Slurm/LSF clusters, see the "Set up a compute node with Singularity":{{ site.baseurl }}/install/crunch2/install-compute-node-singularity.html page. For cloud compute clusters, see the "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html page.
 
 h2. Design overview
 
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index 5ea72f5e7..7c922e28d 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -16,6 +16,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 # "Introduction":#introduction
 # "Create an SSH keypair":#sshkeypair
 # "The build script":#building
+# "Singularity mksquashfs configuration":#singularity_mksquashfs_configuration
 # "Build an AWS image":#aws
 # "Build an Azure image":#azure
 
@@ -54,6 +55,12 @@ foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
 </code></pre>
 </notextile>
 
+{% assign show_docker_warning = true %}
+
+{% include 'singularity_mksquashfs_configuration' %}
+
+The desired amount of memory to make available for @mksquashfs@ can be configured in an argument to the build script, see the next section. It defaults to @512M at .
+
 h2(#building). The build script
 
 The necessary files are located in the @arvados/tools/compute-images@ directory in the source tree. A build script is provided to generate the image. The @--help@ argument lists all available options:
@@ -97,15 +104,15 @@ Options:
   --azure-sku (default: unset, required if building for Azure, e.g. 16.04-LTS)
       Azure SKU image to use
   --ssh_user  (default: packer)
-      The user packer will use lo log into the image
-  --domain  (default: arvadosapi.com)
-      The domain part of the FQDN for the cluster
-  --resolver (default: 8.8.8.8)
+      The user packer will use to log into the image
+  --resolver (default: host's network provided)
       The dns resolver for the machine
   --reposuffix (default: unset)
       Set this to "-dev" to track the unstable/dev Arvados repositories
   --public-key-file (required)
       Path to the public key file that a-d-c will use to log into the compute node
+  --mksquashfs-mem (default: 512M)
+      Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
   --debug
       Output debug information (default: false)
 </code></pre></notextile>
diff --git a/doc/install/crunch2-slurm/install-test.html.textile.liquid b/doc/install/crunch2-slurm/install-test.html.textile.liquid
index 786a71d3e..dc13c3c0f 100644
--- a/doc/install/crunch2-slurm/install-test.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-test.html.textile.liquid
@@ -26,7 +26,7 @@ If it works, this command should print @OK@ (it may also show some status messag
 
 h2. Test the dispatcher
 
-Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html.
+Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html.
 
 On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
 
diff --git a/doc/install/crunch2/install-compute-node-docker.html.textile.liquid b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
index 7e8f1dea7..876bb6ae5 100644
--- a/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
@@ -25,7 +25,7 @@ These instructions apply when Containers.RuntimeEngine is set to @docker@, refer
 
 h2(#introduction). Introduction
 
-This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados, with Slurm on a static cluster. These steps must be performed on every compute node.
+This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node.
 
 h2(#docker). Set up Docker
 
diff --git a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
index 52b2612a5..09a3b4e3a 100644
--- a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
@@ -14,22 +14,43 @@ This page describes the requirements for a compute node in a Slurm or LSF cluste
 {% include 'notebox_end' %}
 
 {% include 'notebox_begin_warning' %}
-These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a Slurm compute node with Docker":install-compute-node-docker.html when running @docker at .
+These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a compute node with Docker":install-compute-node-docker.html when running @docker at .
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
+# "Install python-arvados-fuse and crunch-run and squashfs-tools":#install-packages
 # "Set up Singularity":#singularity
-# "Update fuse.conf":#fuse
-# "Install'python-arvados-fuse and crunch-run":#install-packages
+# "Singularity mksquashfs configuration":#singularity_mksquashfs_configuration
 
 h2(#introduction). Introduction
 
-This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados, with Slurm on a static cluster. These steps must be performed on every compute node.
+Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html documentation in the Architecture section.
+
+This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node.
+
+{% assign arvados_component = 'python-arvados-fuse crunch-run squashfs-tools' %}
+
+{% include 'install_packages' %}
 
 h2(#singularity). Set up Singularity
 
-See "Singularity container runtime":../singularity.html
+Follow the "Singularity installation instructions":https://sylabs.io/guides/3.7/user-guide/quick_start.html. Make sure @singularity@ and @mksquashfs@ are working:
 
-{% assign arvados_component = 'python-arvados-fuse crunch-run' %}
+<notextile>
+<pre><code>$ <span class="userinput">singularity version</span>
+3.7.4
+$ <span class="userinput">mksquashfs -version</span>
+mksquashfs version 4.3-git (2014/06/09)
+[...]
+</code></pre>
+</notextile>
 
-{% include 'install_packages' %}
+Then update @Containers.RuntimeEngine@ in your cluster configuration:
+
+<notextile>
+<pre><code>      # Container runtime: "docker" (default) or "singularity"
+      RuntimeEngine: singularity
+</code></pre>
+</notextile>
+
+{% include 'singularity_mksquashfs_configuration' %}
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
index 1b27ca6ed..2bd9710f7 100644
--- a/doc/install/index.html.textile.liquid
+++ b/doc/install/index.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin' %}
-This section is about installing an Arvados cluster.  If you are just looking to install Arvados client tools and libraries, "go to the SDK section.":{{site.baseurl}}/sdk
+This section is about installing an Arvados cluster.  If you are just looking to install Arvados client tools and libraries, "go to the SDK section.":{{site.baseurl}}/sdk/
 {% include 'notebox_end' %}
 
 Arvados components run on GNU/Linux systems, and supports AWS, GCP and Azure cloud platforms as well as on-premises installs.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.  "Arvados is Free Software":{{site.baseurl}}/user/copying/copying.html and self-install installations are not limited in any way.  Commercial support and development are also available from "Curii Corporation.":mailto:info at curii.com
diff --git a/doc/install/singularity.html.textile.liquid b/doc/install/singularity.html.textile.liquid
deleted file mode 100644
index dfe12f314..000000000
--- a/doc/install/singularity.html.textile.liquid
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Singularity container runtime
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-h2(#overview). Overview
-
-Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html documentation in the Architecture section.
-
-h2(#configuration). Configuration
-
-To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or Slurm/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
-
-<notextile>
-<pre><code>$ <span class="userinput">singularity version</span>
-3.7.4
-$ <span class="userinput">mksquashfs -version</span>
-mksquashfs version 4.3-git (2014/06/09)
-[...]
-</code></pre>
-</notextile>
-
-Then update @Containers.RuntimeEngine@ in your cluster configuration:
-
-<notextile>
-<pre><code>      # Container runtime: "docker" (default) or "singularity"
-      RuntimeEngine: singularity
-</code></pre>
-</notextile>
-
-Restart your dispatcher (@crunch-dispatch-slurm@, @arvados-dispatch-cloud@, or @arvados-dispatch-lsf@) after updating your configuration file.
-
-h2(#singularity_configuration). Singularity configuration
-
-Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 512M at . The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory.
diff --git a/doc/user/topics/arv-docker.html.textile.liquid b/doc/user/topics/arv-docker.html.textile.liquid
index 8a97df6e1..391b4e779 100644
--- a/doc/user/topics/arv-docker.html.textile.liquid
+++ b/doc/user/topics/arv-docker.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 This page describes how to set up the runtime environment (e.g., the programs, libraries, and other dependencies needed to run a job) that a workflow step will be run in using "Docker":https://www.docker.com/ or "Singularity":https://sylabs.io/singularity/.  Docker and Singularity are tools for building and running containers that isolate applications from other applications running on the same node.  For detailed information, see the "Docker User Guide":https://docs.docker.com/userguide/ and the "Introduction to Singularity":https://sylabs.io/guides/3.5/user-guide/introduction.html.
 
-Note that Arvados always works with Docker images, even when it is configured to use Singularity to run containers. There are some differences between the two runtimes that can affect your containers. See the "Singularity container runtime":{{site.baseurl}}/install/singularity.html page for details.
+Note that Arvados always works with Docker images, even when it is configured to use Singularity to run containers. There are some differences between the two runtimes that can affect your containers. See the "Singularity architecture":{{site.baseurl}}/architecture/singularity.html page for details.
 
 This page describes:
 

commit 547fae4ba065b99e2a2832f441b745dc7cd59889
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 22 10:29:16 2021 -0400

    18289: Documentation changes to reflect we support both Slurm and LSF.
           Refactor the compute node installation instructions. Fix a few
           linkchecker warnings.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_config.yml b/doc/_config.yml
index 6f1c90d9b..8cc4c398e 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -246,16 +246,19 @@ navbar:
       - install/install-shell-server.html.textile.liquid
       - install/install-webshell.html.textile.liquid
       - install/install-arv-git-httpd.html.textile.liquid
-    - Containers API (cloud):
+    - Containers API (all):
       - install/install-jobs-image.html.textile.liquid
+    - Containers API (cloud):
       - install/crunch2-cloud/install-compute-node.html.textile.liquid
       - install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
-    - Containers API (slurm):
+    - Compute nodes (Slurm or LSF):
+      - install/crunch2/install-compute-node-docker.html.textile.liquid
+      - install/crunch2/install-compute-node-singularity.html.textile.liquid
+    - Containers API (Slurm):
       - install/crunch2-slurm/install-dispatch.html.textile.liquid
       - install/crunch2-slurm/configure-slurm.html.textile.liquid
-      - install/crunch2-slurm/install-compute-node.html.textile.liquid
       - install/crunch2-slurm/install-test.html.textile.liquid
-    - Containers API (lsf):
+    - Containers API (LSF):
       - install/crunch2-lsf/install-dispatch.html.textile.liquid
     - Additional configuration:
       - install/singularity.html.textile.liquid
diff --git a/doc/_includes/_container_scheduling_parameters.liquid b/doc/_includes/_container_scheduling_parameters.liquid
index abbe6f4c0..be046173a 100644
--- a/doc/_includes/_container_scheduling_parameters.liquid
+++ b/doc/_includes/_container_scheduling_parameters.liquid
@@ -6,7 +6,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2. Scheduling parameters
 
-Parameters to be passed to the container scheduler (e.g., SLURM) when running a container.
+Parameters to be passed to the container scheduler (e.g., Slurm) when running a container.
 
 table(table table-bordered table-condensed).
 |_. Key|_. Type|_. Description|_. Notes|
diff --git a/doc/_includes/_install_compute_fuse.liquid b/doc/_includes/_install_compute_fuse.liquid
index 40a7865ba..95679f3fa 100644
--- a/doc/_includes/_install_compute_fuse.liquid
+++ b/doc/_includes/_install_compute_fuse.liquid
@@ -6,10 +6,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#fuse). Update fuse.conf
 
-{% include 'notebox_begin_warning' %}
-This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
-{% include 'notebox_end' %}
-
 FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:
 
 <notextile>
diff --git a/doc/_includes/_install_docker_cleaner.liquid b/doc/_includes/_install_docker_cleaner.liquid
index 1606a04e1..f8e9e049d 100644
--- a/doc/_includes/_install_docker_cleaner.liquid
+++ b/doc/_includes/_install_docker_cleaner.liquid
@@ -6,10 +6,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#docker-cleaner). Update docker-cleaner.json
 
-{% include 'notebox_begin_warning' %}
-This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
-{% include 'notebox_end' %}
-
 The @arvados-docker-cleaner@ program removes least recently used Docker images as needed to keep disk usage below a configured limit.
 
 Create a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.
diff --git a/doc/api/execution.html.textile.liquid b/doc/api/execution.html.textile.liquid
index 74942ba55..1d41acb01 100644
--- a/doc/api/execution.html.textile.liquid
+++ b/doc/api/execution.html.textile.liquid
@@ -15,7 +15,7 @@ h2. Container API
 
 # To submit work, create a "container request":{{site.baseurl}}/api/methods/container_requests.html in the @Committed@ state.
 # The system will fufill the container request by creating or reusing a "Container object":{{site.baseurl}}/api/methods/containers.html and assigning it to the @container_uuid@ field.  If the same request has been submitted in the past, it may reuse an existing container.  The reuse behavior can be suppressed with @use_existing: false@ in the container request.
-# The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as SLURM).
+# The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as Slurm).
 # The container executes.  Upon termination the container goes into the  @Complete@ state.  If the container execution was interrupted or lost due to system failure, it will go into the @Cancelled@ state.
 # When the container associated with the container request is completed, the container request will go into the @Final@ state.
 # The @output_uuid@ field of the container request contains the uuid of output collection produced by container request.
diff --git a/doc/api/methods/container_requests.html.textile.liquid b/doc/api/methods/container_requests.html.textile.liquid
index 0aa96c3c3..870470110 100644
--- a/doc/api/methods/container_requests.html.textile.liquid
+++ b/doc/api/methods/container_requests.html.textile.liquid
@@ -72,7 +72,7 @@ Priority 1 is the lowest priority.
 
 Priority 1000 is the highest priority.
 
-The actual order that containers execute is determined by the underlying scheduling software (e.g. SLURM) and may be based on a combination of container priority, submission time, available resources, and other factors.
+The actual order that containers execute is determined by the underlying scheduling software (e.g. Slurm) and may be based on a combination of container priority, submission time, available resources, and other factors.
 
 In the current implementation, the magnitude of difference in priority between two containers affects the weight of priority vs age in determining scheduling order.  If two containers have only a small difference in priority (for example, 500 and 501) and the lower priority container has a longer queue time, the lower priority container may be scheduled before the higher priority container.  Use a greater magnitude difference (for example, 500 and 600) to give higher weight to priority over queue time.
 
diff --git a/doc/architecture/singularity.html.textile.liquid b/doc/architecture/singularity.html.textile.liquid
index dc10d28d3..a94af598b 100644
--- a/doc/architecture/singularity.html.textile.liquid
+++ b/doc/architecture/singularity.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a SLURM/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. See the "Singularity page in the installation guide":{{ site.baseurl }}/install/singularity.html for configuration details.
+Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. See the "Singularity page in the installation guide":{{ site.baseurl }}/install/singularity.html for configuration details.
 
 h2. Design overview
 
@@ -30,6 +30,6 @@ h2. Limitations
 Arvados @Singularity@ support is a work in progress. These are the current limitations of the implementation:
 
 * Even when using the Singularity runtime, users' container images are expected to be saved in Docker format. Specifying a @.sif@ file as an image when submitting a container request is not yet supported.
-* Arvados' Singularity implementation does not yet limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by SLURM/LSF.
+* Arvados' Singularity implementation does not yet limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by Slurm/LSF.
 * The Docker ENTRYPOINT instruction is ignored.
 * Arvados is tested with Singularity version 3.7.4. Other versions may not work.
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index 0638ce70e..5ea72f5e7 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-arvados-dispatch-cloud is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm.
+ at arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
diff --git a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
index 347804662..b4987f443 100644
--- a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-arvados-dispatch-cloud is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm.
+ at arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
diff --git a/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid b/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
index c38e6f205..7e44c8ec4 100644
--- a/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-arvados-dispatch-lsf is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you are installing a cloud cluster.
+ at arvados-dispatch-lsf@ is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you use Slurm or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
 h2(#overview). Overview
@@ -19,7 +19,7 @@ Containers can be dispatched to an LSF cluster.  The dispatcher sends work to th
 
 In order to run containers, you must choose a user that has permission to set up FUSE mounts and run Singularity/Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
 
-Set up all of your compute nodes "as you would for a SLURM cluster":../crunch2-slurm/install-compute-node.html.
+Set up all of your compute nodes with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html.
 
 *Current limitations*:
 * Arvados container priority is not propagated to LSF job priority. This can cause inefficient use of compute resources, and even deadlock if there are fewer compute nodes than concurrent Arvados workflows.
diff --git a/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid b/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid
index eda330798..b6f1ac808 100644
--- a/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid
+++ b/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid
@@ -10,10 +10,10 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+ at crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
-Containers can be dispatched to a Slurm cluster.  The dispatcher sends work to the cluster using Slurm's @sbatch@ command, so it works in a variety of SLURM configurations.
+Containers can be dispatched to a Slurm cluster.  The dispatcher sends work to the cluster using Slurm's @sbatch@ command, so it works in a variety of Slurm configurations.
 
 In order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
 
@@ -76,7 +76,7 @@ h3. Slurm configuration essentials
 
 Whenever you change this file, you will need to update the copy _on every compute node_ as well as the controller node, and then run @sudo scontrol reconfigure at .
 
-*@ControlMachine@* should be a DNS name that resolves to the Slurm controller (dispatch/API server). This must resolve correctly on all Slurm worker nodes as well as the controller itself. In general SLURM is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.
+*@ControlMachine@* should be a DNS name that resolves to the Slurm controller (dispatch/API server). This must resolve correctly on all Slurm worker nodes as well as the controller itself. In general Slurm is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.
 
 *@SelectType=select/linear@* is needed on cloud-based installations that update node sizes dynamically, but it can only schedule one container at a time on each node. On a static or homogeneous cluster, use @SelectType=select/cons_res@ with @SelectTypeParameters=CR_CPU_Memory@ instead to enable node sharing.
 
@@ -103,7 +103,7 @@ If you want Arvados to assign names to your nodes with a different consecutive n
 * In @application.yml@: <code>assign_node_hostname: worker1-%<slot_number>04d</code>
 * In @slurm.conf@: <code>NodeName=worker1-[0000-0255]</code>
 
-If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script (see "Installing a compute node":install-compute-node.html) send its current hostname, rather than expect Arvados to assign one.
+If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script send its current hostname, rather than expect Arvados to assign one.
 * In @application.yml@: <code>assign_node_hostname: false</code>
 * In @slurm.conf@: <code>NodeName=alice,bob,clay,darlene</code>
 
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
index 5b5b868e5..52553a35e 100644
--- a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+ at crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
@@ -22,7 +22,7 @@ crunch-dispatch-slurm is only relevant for on premises clusters that will spool
 
 h2(#introduction). Introduction
 
-This assumes you already have a Slurm cluster, and have "set up all of your compute nodes":install-compute-node.html.  Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
+This assumes you already have a Slurm cluster, and have set up all of your compute nodes with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html.  Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
 
 The Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@).  It is not resource-intensive, so you can run it on the API server node.
 
@@ -79,7 +79,7 @@ Some Arvados installations run a local keepstore on each compute node to handle
 h3(#PrioritySpread). Containers.Slurm.PrioritySpread
 
 crunch-dispatch-slurm adjusts the "nice" values of its Slurm jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.
-* If non-Arvados jobs run on your Slurm cluster, and your Arvados containers are waiting too long in the Slurm queue because their "nice" values are too high for them to compete with other SLURM jobs, you should use a smaller PrioritySpread value.
+* If non-Arvados jobs run on your Slurm cluster, and your Arvados containers are waiting too long in the Slurm queue because their "nice" values are too high for them to compete with other Slurm jobs, you should use a smaller PrioritySpread value.
 * If you have an older Slurm system that limits nice values to 10000, a smaller @PrioritySpread@ can help avoid reaching that limit.
 * In other cases, a larger value is beneficial because it reduces the total number of adjustments made by executing @scontrol at .
 
@@ -122,7 +122,7 @@ The choice of subsystem ("memory" in this example) must correspond to one of the
 
 Some versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice.  This causes an error when specifying a cgroup parent created outside systemd, such as those created by Slurm.
 
-You can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use Slurm's cgroups as container parents.  To do this, "configure the Docker daemon on all compute nodes":install-compute-node.html#configure_docker_daemon to run with the option @--exec-opt native.cgroupdriver=cgroupfs at .
+You can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use Slurm's cgroups as container parents.  To do this, configure the Docker daemon on all compute nodes to run with the option @--exec-opt native.cgroupdriver=cgroupfs at .
 
 {% include 'notebox_end' %}
 
diff --git a/doc/install/crunch2-slurm/install-test.html.textile.liquid b/doc/install/crunch2-slurm/install-test.html.textile.liquid
index 647995a8c..786a71d3e 100644
--- a/doc/install/crunch2-slurm/install-test.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-test.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+ at crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
 h2. Test compute node setup
@@ -26,6 +26,8 @@ If it works, this command should print @OK@ (it may also show some status messag
 
 h2. Test the dispatcher
 
+Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html.
+
 On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
 
 <notextile>
diff --git a/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
similarity index 63%
rename from doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
rename to doc/install/crunch2/install-compute-node-docker.html.textile.liquid
index 8c01c44ed..7e8f1dea7 100644
--- a/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: installguide
-title: Set up a Slurm compute node
+title: Set up a compute node with Docker
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -10,16 +10,17 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+{% include 'notebox_end' %}
+
+{% include 'notebox_begin_warning' %}
+These instructions apply when Containers.RuntimeEngine is set to @docker@, refer to "Set up a compute node with Singularity":install-compute-node-singularity.html when running @singularity at .
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
 # "Set up Docker":#docker
 # "Update fuse.conf":#fuse
 # "Update docker-cleaner.json":#docker-cleaner
-# "Configure Linux cgroups accounting":#cgroups
-# "Install Docker":#install_docker
-# "Configure the Docker daemon":#configure_docker_daemon
 # "Install'python-arvados-fuse and crunch-run and arvados-docker-cleaner":#install-packages
 
 h2(#introduction). Introduction
diff --git a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
new file mode 100644
index 000000000..52b2612a5
--- /dev/null
+++ b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
@@ -0,0 +1,35 @@
+---
+layout: default
+navsection: installguide
+title: Set up a compute node with Singularity
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin_warning' %}
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+{% include 'notebox_end' %}
+
+{% include 'notebox_begin_warning' %}
+These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a Slurm compute node with Docker":install-compute-node-docker.html when running @docker at .
+{% include 'notebox_end' %}
+
+# "Introduction":#introduction
+# "Set up Singularity":#singularity
+# "Update fuse.conf":#fuse
+# "Install'python-arvados-fuse and crunch-run":#install-packages
+
+h2(#introduction). Introduction
+
+This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados, with Slurm on a static cluster. These steps must be performed on every compute node.
+
+h2(#singularity). Set up Singularity
+
+See "Singularity container runtime":../singularity.html
+
+{% assign arvados_component = 'python-arvados-fuse crunch-run' %}
+
+{% include 'install_packages' %}
diff --git a/doc/install/singularity.html.textile.liquid b/doc/install/singularity.html.textile.liquid
index b2a39790d..dfe12f314 100644
--- a/doc/install/singularity.html.textile.liquid
+++ b/doc/install/singularity.html.textile.liquid
@@ -15,7 +15,7 @@ Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html
 
 h2(#configuration). Configuration
 
-To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or SLURM/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
+To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or Slurm/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
 
 <notextile>
 <pre><code>$ <span class="userinput">singularity version</span>
diff --git a/doc/sdk/index.html.textile.liquid b/doc/sdk/index.html.textile.liquid
index b6ed39ed2..844f1ff71 100644
--- a/doc/sdk/index.html.textile.liquid
+++ b/doc/sdk/index.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This section documents language bindings for the "Arvados API":{{site.baseurl}}/api and Keep that are available for various programming languages.  Not all features are available in every SDK.  The most complete SDK is the Python SDK.  Note that this section only gives a high level overview of each SDK.  Consult the "Arvados API":{{site.baseurl}}/api section for detailed documentation about Arvados API calls available on each resource.
+This section documents language bindings for the "Arvados API":{{site.baseurl}}/api/index.html and Keep that are available for various programming languages.  Not all features are available in every SDK.  The most complete SDK is the Python SDK.  Note that this section only gives a high level overview of each SDK.  Consult the "Arvados API":{{site.baseurl}}/api/index.html section for detailed documentation about Arvados API calls available on each resource.
 
 * "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html (also includes essential command line tools such as "arv-put" and "arv-get")
 * "Command line SDK":{{site.baseurl}}/sdk/cli/install.html ("arv")

commit a66c119e7198d95969fbcbcde48a05c92dbc07ac
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 19:33:48 2021 -0400

    18289: documentation: singularity refinements.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_includes/_install_compute_fuse.liquid b/doc/_includes/_install_compute_fuse.liquid
index 95679f3fa..40a7865ba 100644
--- a/doc/_includes/_install_compute_fuse.liquid
+++ b/doc/_includes/_install_compute_fuse.liquid
@@ -6,6 +6,10 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#fuse). Update fuse.conf
 
+{% include 'notebox_begin_warning' %}
+This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
+{% include 'notebox_end' %}
+
 FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:
 
 <notextile>
diff --git a/doc/_includes/_install_docker_cleaner.liquid b/doc/_includes/_install_docker_cleaner.liquid
index f8e9e049d..1606a04e1 100644
--- a/doc/_includes/_install_docker_cleaner.liquid
+++ b/doc/_includes/_install_docker_cleaner.liquid
@@ -6,6 +6,10 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#docker-cleaner). Update docker-cleaner.json
 
+{% include 'notebox_begin_warning' %}
+This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
+{% include 'notebox_end' %}
+
 The @arvados-docker-cleaner@ program removes least recently used Docker images as needed to keep disk usage below a configured limit.
 
 Create a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.

commit 644d62c8bb6528713db56a8464ffaba94740e0e5
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 16:53:03 2021 -0400

    18289: only pass the --allow-other argument to arv-mount when the
           runtime is Docker.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go
index 42f143f1c..3036d5555 100644
--- a/lib/crunchrun/crunchrun.go
+++ b/lib/crunchrun/crunchrun.go
@@ -402,11 +402,14 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
 	arvMountCmd := []string{
 		"arv-mount",
 		"--foreground",
-		"--allow-other",
 		"--read-write",
 		"--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
 		fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
 
+	if runner.executor.Runtime() == "docker" {
+		arvMountCmd = append(arvMountCmd, "--allow-other")
+	}
+
 	if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
 		arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
 	}
diff --git a/lib/crunchrun/crunchrun_test.go b/lib/crunchrun/crunchrun_test.go
index 1131982de..4c5f517b1 100644
--- a/lib/crunchrun/crunchrun_test.go
+++ b/lib/crunchrun/crunchrun_test.go
@@ -1124,7 +1124,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		cr.statInterval = 5 * time.Second
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}})
@@ -1144,7 +1144,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "foo,bar", "--crunchstat-interval=5",
 			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/out": {realTemp + "/tmp2", false}, "/tmp": {realTemp + "/tmp3", false}})
@@ -1164,7 +1164,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}, "/etc/arvados/ca-certificates.crt": {stubCertPath, true}})
@@ -1187,7 +1187,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/keeptmp": {realTemp + "/keep1/tmp0", false}})
@@ -1210,7 +1210,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
@@ -1237,7 +1237,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
@@ -1320,7 +1320,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{

commit ea8114022b55a158e1df2bfbdfa29d0703940708
Author: Tom Clegg <tom at curii.com>
Date:   Thu Oct 21 14:38:48 2021 -0400

    18286: Document requirement to add Services.DispatchLSF section.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid b/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
index 6fda50688..c38e6f205 100644
--- a/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
@@ -29,6 +29,15 @@ h2(#update-config). Update config.yml
 
 Arvados-dispatch-lsf reads the common configuration file at @/etc/arvados/config.yml at .
 
+Add a DispatchLSF entry to the Services section, using the hostname where @arvados-dispatch-lsf@ will run, and an available port:
+
+<notextile>
+<pre>    Services:
+      DispatchLSF:
+        InternalURLs:
+          "http://<code class="userinput">hostname.zzzzz.arvadosapi.com:9007</code>": {}</pre>
+</notextile>
+
 Review the following configuration parameters and adjust as needed.
 
 h3(#BsubSudoUser). Containers.LSF.BsubSudoUser

commit a598bba05ec26c8103bd7f8cf9e4dcf451469531
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date:   Mon Sep 20 15:26:12 2021 -0300

    Pin arvados 2.3.0 version in provision.sh. Refs #18130
    
    Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>

diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index a56a31876..cb75610a2 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -165,14 +165,14 @@ WORKBENCH2_EXT_SSL_PORT=3001
 # release.
 # The "local.params.example.*" files already set "RELEASE=production"
 # to deploy  production-ready packages
-RELEASE="development"
-VERSION="latest"
+RELEASE="production"
+VERSION="2.3.0-1"
 
 # These are arvados-formula-related parameters
 # An arvados-formula tag. For a stable release, this should be a
 # branch name (e.g. X.Y-dev) or tag for the release.
-# ARVADOS_TAG="2.2.0"
-# BRANCH="main"
+# ARVADOS_TAG="2.3.0"
+BRANCH="2.3-dev"
 
 # Other formula versions we depend on
 POSTGRES_TAG="v0.41.6"

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list