[ARVADOS] updated: 2.1.0-1503-g1e008042a

Git user git at public.arvados.org
Fri Oct 22 18:22:05 UTC 2021


Summary of changes:
 doc/_config.yml                                    | 12 +++--
 .../_container_scheduling_parameters.liquid        |  2 +-
 .../_singularity_mksquashfs_configuration.liquid   | 15 ++++++
 doc/api/execution.html.textile.liquid              |  2 +-
 .../methods/container_requests.html.textile.liquid |  2 +-
 doc/architecture/singularity.html.textile.liquid   |  4 +-
 .../install-compute-node.html.textile.liquid       | 17 +++++--
 .../install-dispatch-cloud.html.textile.liquid     |  2 +-
 .../install-dispatch.html.textile.liquid           |  4 +-
 .../configure-slurm.html.textile.liquid            |  8 ++--
 .../install-dispatch.html.textile.liquid           |  8 ++--
 .../crunch2-slurm/install-test.html.textile.liquid |  4 +-
 ...nstall-compute-node-docker.html.textile.liquid} | 13 ++---
 ...ll-compute-node-singularity.html.textile.liquid | 56 ++++++++++++++++++++++
 doc/install/index.html.textile.liquid              |  2 +-
 doc/install/singularity.html.textile.liquid        | 41 ----------------
 doc/sdk/index.html.textile.liquid                  |  2 +-
 doc/user/topics/arv-docker.html.textile.liquid     |  2 +-
 lib/crunchrun/crunchrun.go                         |  5 +-
 lib/crunchrun/crunchrun_test.go                    | 14 +++---
 tools/compute-images/arvados-images-aws.json       |  3 +-
 tools/compute-images/arvados-images-azure.json     |  3 +-
 tools/compute-images/build.sh                      | 12 ++++-
 tools/compute-images/scripts/base.sh               |  5 ++
 24 files changed, 150 insertions(+), 88 deletions(-)
 create mode 100644 doc/_includes/_singularity_mksquashfs_configuration.liquid
 rename doc/install/{crunch2-slurm/install-compute-node.html.textile.liquid => crunch2/install-compute-node-docker.html.textile.liquid} (56%)
 create mode 100644 doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
 delete mode 100644 doc/install/singularity.html.textile.liquid

       via  1e008042ac7a5b7dfe4a11a8f33f71c57ee2666a (commit)
       via  bcb56b17389d162a53546c5efaf288ba446b7f84 (commit)
       via  547fae4ba065b99e2a2832f441b745dc7cd59889 (commit)
       via  a66c119e7198d95969fbcbcde48a05c92dbc07ac (commit)
       via  644d62c8bb6528713db56a8464ffaba94740e0e5 (commit)
      from  ea8114022b55a158e1df2bfbdfa29d0703940708 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 1e008042ac7a5b7dfe4a11a8f33f71c57ee2666a
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 22 12:47:17 2021 -0400

    18289: add support to set `mksquashfs mem` for Singularity in our
           compute node image builder for cloud setups.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/tools/compute-images/arvados-images-aws.json b/tools/compute-images/arvados-images-aws.json
index 4d757abfd..b1b4c909d 100644
--- a/tools/compute-images/arvados-images-aws.json
+++ b/tools/compute-images/arvados-images-aws.json
@@ -8,6 +8,7 @@
     "aws_source_ami": "ami-04d70e069399af2e9",
     "build_environment": "aws",
     "public_key_file": "",
+    "mksquashfs_mem": "",
     "reposuffix": "",
     "resolver": "",
     "ssh_user": "admin",
@@ -76,6 +77,6 @@
     "type": "shell",
     "execute_command": "sudo -S env {{ .Vars }} /bin/bash '{{ .Path }}'",
     "script": "scripts/base.sh",
-    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}"]
+    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}","MKSQUASHFS_MEM={{user `mksquashfs_mem`}}"]
   }]
 }
diff --git a/tools/compute-images/arvados-images-azure.json b/tools/compute-images/arvados-images-azure.json
index ec1d9b6a6..20f776d04 100644
--- a/tools/compute-images/arvados-images-azure.json
+++ b/tools/compute-images/arvados-images-azure.json
@@ -10,6 +10,7 @@
     "location": "centralus",
     "project_id": "",
     "public_key_file": "",
+    "mksquashfs_mem": "",
     "reposuffix": "",
     "resolver": "",
     "resource_group": null,
@@ -65,6 +66,6 @@
     "type": "shell",
     "execute_command": "sudo -S env {{ .Vars }} /bin/bash '{{ .Path }}'",
     "script": "scripts/base.sh",
-    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}"]
+    "environment_vars": ["RESOLVER={{user `resolver`}}","REPOSUFFIX={{user `reposuffix`}}","MKSQUASHFS_MEM={{user `mksquashfs_mem`}}"]
   }]
 }
diff --git a/tools/compute-images/build.sh b/tools/compute-images/build.sh
index a2dd2ed28..a714bafc1 100755
--- a/tools/compute-images/build.sh
+++ b/tools/compute-images/build.sh
@@ -55,6 +55,8 @@ Options:
       Set this to "-dev" to track the unstable/dev Arvados repositories
   --public-key-file (required)
       Path to the public key file that a-d-c will use to log into the compute node
+  --mksquashfs-mem (default: 512M)
+      Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
   --debug
       Output debug information (default: false)
 
@@ -78,9 +80,10 @@ DEBUG=
 SSH_USER=
 AWS_DEFAULT_REGION=us-east-1
 PUBLIC_KEY_FILE=
+MKSQUASHFS_MEM=512M
 
 PARSEDOPTS=$(getopt --name "$0" --longoptions \
-    help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,debug \
+    help,json-file:,arvados-cluster-id:,aws-source-ami:,aws-profile:,aws-secrets-file:,aws-region:,aws-vpc-id:,aws-subnet-id:,gcp-project-id:,gcp-account-file:,gcp-zone:,azure-secrets-file:,azure-resource-group:,azure-location:,azure-sku:,azure-cloud-environment:,ssh_user:,resolver:,reposuffix:,public-key-file:,mksquashfs-mem:,debug \
     -- "" "$@")
 if [ $? -ne 0 ]; then
     exit 1
@@ -154,6 +157,9 @@ while [ $# -gt 0 ]; do
         --public-key-file)
             PUBLIC_KEY_FILE="$2"; shift
             ;;
+        --mksquashfs-mem)
+            MKSQUASHFS_MEM="$2"; shift
+            ;;
         --debug)
             # If you want to debug a build issue, add the -debug flag to the build
             # command in question.
@@ -256,6 +262,10 @@ fi
 if [[ "$PUBLIC_KEY_FILE" != "" ]]; then
   EXTRA2+=" -var public_key_file=$PUBLIC_KEY_FILE"
 fi
+if [[ "$MKSQUASHFS_MEM" != "" ]]; then
+  EXTRA2+=" -var mksquashfs_mem=$MKSQUASHFS_MEM"
+fi
+
 
 echo
 packer version
diff --git a/tools/compute-images/scripts/base.sh b/tools/compute-images/scripts/base.sh
index 022f4a7e5..0ab51223b 100644
--- a/tools/compute-images/scripts/base.sh
+++ b/tools/compute-images/scripts/base.sh
@@ -89,6 +89,11 @@ make -C ./builddir
 make -C ./builddir install
 ln -sf /var/lib/arvados/bin/* /usr/local/bin/
 
+# set `mksquashfs mem` in the singularity config file if it is configured
+if [ "$MKSQUASHFS_MEM" != "" ]; then
+  echo "mksquashfs mem = ${MKSQUASHFS_MEM}" >> /var/lib/arvados/etc/singularity/singularity.conf
+fi
+
 # Print singularity version installed
 singularity --version
 

commit bcb56b17389d162a53546c5efaf288ba446b7f84
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 22 12:46:45 2021 -0400

    18289: Address review comments (documentation).
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_config.yml b/doc/_config.yml
index 8cc4c398e..31db9c41d 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -261,7 +261,6 @@ navbar:
     - Containers API (LSF):
       - install/crunch2-lsf/install-dispatch.html.textile.liquid
     - Additional configuration:
-      - install/singularity.html.textile.liquid
       - install/container-shell-access.html.textile.liquid
     - External dependencies:
       - install/install-postgresql.html.textile.liquid
diff --git a/doc/_includes/_singularity_mksquashfs_configuration.liquid b/doc/_includes/_singularity_mksquashfs_configuration.liquid
new file mode 100644
index 000000000..dc0c394ba
--- /dev/null
+++ b/doc/_includes/_singularity_mksquashfs_configuration.liquid
@@ -0,0 +1,15 @@
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+h2(#singularity_mksquashfs_configuration). Singularity mksquashfs configuration
+
+{% if show_docker_warning != nil %}
+{% include 'notebox_begin_warning' %}
+This section is only relevant when using Singularity. Skip this section when using Docker.
+{% include 'notebox_end' %}
+{% endif %}
+
+Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 512M at . The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory.
diff --git a/doc/architecture/singularity.html.textile.liquid b/doc/architecture/singularity.html.textile.liquid
index a94af598b..9a82cd93d 100644
--- a/doc/architecture/singularity.html.textile.liquid
+++ b/doc/architecture/singularity.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. See the "Singularity page in the installation guide":{{ site.baseurl }}/install/singularity.html for configuration details.
+Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. For on premises Slurm/LSF clusters, see the "Set up a compute node with Singularity":{{ site.baseurl }}/install/crunch2/install-compute-node-singularity.html page. For cloud compute clusters, see the "Build a cloud compute node image":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html page.
 
 h2. Design overview
 
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index 5ea72f5e7..7c922e28d 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -16,6 +16,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 # "Introduction":#introduction
 # "Create an SSH keypair":#sshkeypair
 # "The build script":#building
+# "Singularity mksquashfs configuration":#singularity_mksquashfs_configuration
 # "Build an AWS image":#aws
 # "Build an Azure image":#azure
 
@@ -54,6 +55,12 @@ foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=
 </code></pre>
 </notextile>
 
+{% assign show_docker_warning = true %}
+
+{% include 'singularity_mksquashfs_configuration' %}
+
+The desired amount of memory to make available for @mksquashfs@ can be configured in an argument to the build script, see the next section. It defaults to @512M at .
+
 h2(#building). The build script
 
 The necessary files are located in the @arvados/tools/compute-images@ directory in the source tree. A build script is provided to generate the image. The @--help@ argument lists all available options:
@@ -97,15 +104,15 @@ Options:
   --azure-sku (default: unset, required if building for Azure, e.g. 16.04-LTS)
       Azure SKU image to use
   --ssh_user  (default: packer)
-      The user packer will use lo log into the image
-  --domain  (default: arvadosapi.com)
-      The domain part of the FQDN for the cluster
-  --resolver (default: 8.8.8.8)
+      The user packer will use to log into the image
+  --resolver (default: host's network provided)
       The dns resolver for the machine
   --reposuffix (default: unset)
       Set this to "-dev" to track the unstable/dev Arvados repositories
   --public-key-file (required)
       Path to the public key file that a-d-c will use to log into the compute node
+  --mksquashfs-mem (default: 512M)
+      Only relevant when using Singularity. This is the amount of memory mksquashfs is allowed to use.
   --debug
       Output debug information (default: false)
 </code></pre></notextile>
diff --git a/doc/install/crunch2-slurm/install-test.html.textile.liquid b/doc/install/crunch2-slurm/install-test.html.textile.liquid
index 786a71d3e..dc13c3c0f 100644
--- a/doc/install/crunch2-slurm/install-test.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-test.html.textile.liquid
@@ -26,7 +26,7 @@ If it works, this command should print @OK@ (it may also show some status messag
 
 h2. Test the dispatcher
 
-Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html.
+Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html.
 
 On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
 
diff --git a/doc/install/crunch2/install-compute-node-docker.html.textile.liquid b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
index 7e8f1dea7..876bb6ae5 100644
--- a/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
@@ -25,7 +25,7 @@ These instructions apply when Containers.RuntimeEngine is set to @docker@, refer
 
 h2(#introduction). Introduction
 
-This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados, with Slurm on a static cluster. These steps must be performed on every compute node.
+This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node.
 
 h2(#docker). Set up Docker
 
diff --git a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
index 52b2612a5..09a3b4e3a 100644
--- a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
@@ -14,22 +14,43 @@ This page describes the requirements for a compute node in a Slurm or LSF cluste
 {% include 'notebox_end' %}
 
 {% include 'notebox_begin_warning' %}
-These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a Slurm compute node with Docker":install-compute-node-docker.html when running @docker at .
+These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a compute node with Docker":install-compute-node-docker.html when running @docker at .
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
+# "Install python-arvados-fuse and crunch-run and squashfs-tools":#install-packages
 # "Set up Singularity":#singularity
-# "Update fuse.conf":#fuse
-# "Install'python-arvados-fuse and crunch-run":#install-packages
+# "Singularity mksquashfs configuration":#singularity_mksquashfs_configuration
 
 h2(#introduction). Introduction
 
-This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados, with Slurm on a static cluster. These steps must be performed on every compute node.
+Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html documentation in the Architecture section.
+
+This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node.
+
+{% assign arvados_component = 'python-arvados-fuse crunch-run squashfs-tools' %}
+
+{% include 'install_packages' %}
 
 h2(#singularity). Set up Singularity
 
-See "Singularity container runtime":../singularity.html
+Follow the "Singularity installation instructions":https://sylabs.io/guides/3.7/user-guide/quick_start.html. Make sure @singularity@ and @mksquashfs@ are working:
 
-{% assign arvados_component = 'python-arvados-fuse crunch-run' %}
+<notextile>
+<pre><code>$ <span class="userinput">singularity version</span>
+3.7.4
+$ <span class="userinput">mksquashfs -version</span>
+mksquashfs version 4.3-git (2014/06/09)
+[...]
+</code></pre>
+</notextile>
 
-{% include 'install_packages' %}
+Then update @Containers.RuntimeEngine@ in your cluster configuration:
+
+<notextile>
+<pre><code>      # Container runtime: "docker" (default) or "singularity"
+      RuntimeEngine: singularity
+</code></pre>
+</notextile>
+
+{% include 'singularity_mksquashfs_configuration' %}
diff --git a/doc/install/index.html.textile.liquid b/doc/install/index.html.textile.liquid
index 1b27ca6ed..2bd9710f7 100644
--- a/doc/install/index.html.textile.liquid
+++ b/doc/install/index.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin' %}
-This section is about installing an Arvados cluster.  If you are just looking to install Arvados client tools and libraries, "go to the SDK section.":{{site.baseurl}}/sdk
+This section is about installing an Arvados cluster.  If you are just looking to install Arvados client tools and libraries, "go to the SDK section.":{{site.baseurl}}/sdk/
 {% include 'notebox_end' %}
 
 Arvados components run on GNU/Linux systems, and supports AWS, GCP and Azure cloud platforms as well as on-premises installs.  Arvados supports Debian and derivatives such as Ubuntu, as well as Red Hat and derivatives such as CentOS.  "Arvados is Free Software":{{site.baseurl}}/user/copying/copying.html and self-install installations are not limited in any way.  Commercial support and development are also available from "Curii Corporation.":mailto:info at curii.com
diff --git a/doc/install/singularity.html.textile.liquid b/doc/install/singularity.html.textile.liquid
deleted file mode 100644
index dfe12f314..000000000
--- a/doc/install/singularity.html.textile.liquid
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: default
-navsection: installguide
-title: Singularity container runtime
-...
-{% comment %}
-Copyright (C) The Arvados Authors. All rights reserved.
-
-SPDX-License-Identifier: CC-BY-SA-3.0
-{% endcomment %}
-
-h2(#overview). Overview
-
-Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html documentation in the Architecture section.
-
-h2(#configuration). Configuration
-
-To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or Slurm/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
-
-<notextile>
-<pre><code>$ <span class="userinput">singularity version</span>
-3.7.4
-$ <span class="userinput">mksquashfs -version</span>
-mksquashfs version 4.3-git (2014/06/09)
-[...]
-</code></pre>
-</notextile>
-
-Then update @Containers.RuntimeEngine@ in your cluster configuration:
-
-<notextile>
-<pre><code>      # Container runtime: "docker" (default) or "singularity"
-      RuntimeEngine: singularity
-</code></pre>
-</notextile>
-
-Restart your dispatcher (@crunch-dispatch-slurm@, @arvados-dispatch-cloud@, or @arvados-dispatch-lsf@) after updating your configuration file.
-
-h2(#singularity_configuration). Singularity configuration
-
-Docker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 512M at . The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory.
diff --git a/doc/user/topics/arv-docker.html.textile.liquid b/doc/user/topics/arv-docker.html.textile.liquid
index 8a97df6e1..391b4e779 100644
--- a/doc/user/topics/arv-docker.html.textile.liquid
+++ b/doc/user/topics/arv-docker.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 This page describes how to set up the runtime environment (e.g., the programs, libraries, and other dependencies needed to run a job) that a workflow step will be run in using "Docker":https://www.docker.com/ or "Singularity":https://sylabs.io/singularity/.  Docker and Singularity are tools for building and running containers that isolate applications from other applications running on the same node.  For detailed information, see the "Docker User Guide":https://docs.docker.com/userguide/ and the "Introduction to Singularity":https://sylabs.io/guides/3.5/user-guide/introduction.html.
 
-Note that Arvados always works with Docker images, even when it is configured to use Singularity to run containers. There are some differences between the two runtimes that can affect your containers. See the "Singularity container runtime":{{site.baseurl}}/install/singularity.html page for details.
+Note that Arvados always works with Docker images, even when it is configured to use Singularity to run containers. There are some differences between the two runtimes that can affect your containers. See the "Singularity architecture":{{site.baseurl}}/architecture/singularity.html page for details.
 
 This page describes:
 

commit 547fae4ba065b99e2a2832f441b745dc7cd59889
Author: Ward Vandewege <ward at curii.com>
Date:   Fri Oct 22 10:29:16 2021 -0400

    18289: Documentation changes to reflect we support both Slurm and LSF.
           Refactor the compute node installation instructions. Fix a few
           linkchecker warnings.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_config.yml b/doc/_config.yml
index 6f1c90d9b..8cc4c398e 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -246,16 +246,19 @@ navbar:
       - install/install-shell-server.html.textile.liquid
       - install/install-webshell.html.textile.liquid
       - install/install-arv-git-httpd.html.textile.liquid
-    - Containers API (cloud):
+    - Containers API (all):
       - install/install-jobs-image.html.textile.liquid
+    - Containers API (cloud):
       - install/crunch2-cloud/install-compute-node.html.textile.liquid
       - install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
-    - Containers API (slurm):
+    - Compute nodes (Slurm or LSF):
+      - install/crunch2/install-compute-node-docker.html.textile.liquid
+      - install/crunch2/install-compute-node-singularity.html.textile.liquid
+    - Containers API (Slurm):
       - install/crunch2-slurm/install-dispatch.html.textile.liquid
       - install/crunch2-slurm/configure-slurm.html.textile.liquid
-      - install/crunch2-slurm/install-compute-node.html.textile.liquid
       - install/crunch2-slurm/install-test.html.textile.liquid
-    - Containers API (lsf):
+    - Containers API (LSF):
       - install/crunch2-lsf/install-dispatch.html.textile.liquid
     - Additional configuration:
       - install/singularity.html.textile.liquid
diff --git a/doc/_includes/_container_scheduling_parameters.liquid b/doc/_includes/_container_scheduling_parameters.liquid
index abbe6f4c0..be046173a 100644
--- a/doc/_includes/_container_scheduling_parameters.liquid
+++ b/doc/_includes/_container_scheduling_parameters.liquid
@@ -6,7 +6,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2. Scheduling parameters
 
-Parameters to be passed to the container scheduler (e.g., SLURM) when running a container.
+Parameters to be passed to the container scheduler (e.g., Slurm) when running a container.
 
 table(table table-bordered table-condensed).
 |_. Key|_. Type|_. Description|_. Notes|
diff --git a/doc/_includes/_install_compute_fuse.liquid b/doc/_includes/_install_compute_fuse.liquid
index 40a7865ba..95679f3fa 100644
--- a/doc/_includes/_install_compute_fuse.liquid
+++ b/doc/_includes/_install_compute_fuse.liquid
@@ -6,10 +6,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#fuse). Update fuse.conf
 
-{% include 'notebox_begin_warning' %}
-This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
-{% include 'notebox_end' %}
-
 FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:
 
 <notextile>
diff --git a/doc/_includes/_install_docker_cleaner.liquid b/doc/_includes/_install_docker_cleaner.liquid
index 1606a04e1..f8e9e049d 100644
--- a/doc/_includes/_install_docker_cleaner.liquid
+++ b/doc/_includes/_install_docker_cleaner.liquid
@@ -6,10 +6,6 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#docker-cleaner). Update docker-cleaner.json
 
-{% include 'notebox_begin_warning' %}
-This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
-{% include 'notebox_end' %}
-
 The @arvados-docker-cleaner@ program removes least recently used Docker images as needed to keep disk usage below a configured limit.
 
 Create a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.
diff --git a/doc/api/execution.html.textile.liquid b/doc/api/execution.html.textile.liquid
index 74942ba55..1d41acb01 100644
--- a/doc/api/execution.html.textile.liquid
+++ b/doc/api/execution.html.textile.liquid
@@ -15,7 +15,7 @@ h2. Container API
 
 # To submit work, create a "container request":{{site.baseurl}}/api/methods/container_requests.html in the @Committed@ state.
 # The system will fufill the container request by creating or reusing a "Container object":{{site.baseurl}}/api/methods/containers.html and assigning it to the @container_uuid@ field.  If the same request has been submitted in the past, it may reuse an existing container.  The reuse behavior can be suppressed with @use_existing: false@ in the container request.
-# The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as SLURM).
+# The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as Slurm).
 # The container executes.  Upon termination the container goes into the  @Complete@ state.  If the container execution was interrupted or lost due to system failure, it will go into the @Cancelled@ state.
 # When the container associated with the container request is completed, the container request will go into the @Final@ state.
 # The @output_uuid@ field of the container request contains the uuid of output collection produced by container request.
diff --git a/doc/api/methods/container_requests.html.textile.liquid b/doc/api/methods/container_requests.html.textile.liquid
index 0aa96c3c3..870470110 100644
--- a/doc/api/methods/container_requests.html.textile.liquid
+++ b/doc/api/methods/container_requests.html.textile.liquid
@@ -72,7 +72,7 @@ Priority 1 is the lowest priority.
 
 Priority 1000 is the highest priority.
 
-The actual order that containers execute is determined by the underlying scheduling software (e.g. SLURM) and may be based on a combination of container priority, submission time, available resources, and other factors.
+The actual order that containers execute is determined by the underlying scheduling software (e.g. Slurm) and may be based on a combination of container priority, submission time, available resources, and other factors.
 
 In the current implementation, the magnitude of difference in priority between two containers affects the weight of priority vs age in determining scheduling order.  If two containers have only a small difference in priority (for example, 500 and 501) and the lower priority container has a longer queue time, the lower priority container may be scheduled before the higher priority container.  Use a greater magnitude difference (for example, 500 and 600) to give higher weight to priority over queue time.
 
diff --git a/doc/architecture/singularity.html.textile.liquid b/doc/architecture/singularity.html.textile.liquid
index dc10d28d3..a94af598b 100644
--- a/doc/architecture/singularity.html.textile.liquid
+++ b/doc/architecture/singularity.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a SLURM/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. See the "Singularity page in the installation guide":{{ site.baseurl }}/install/singularity.html for configuration details.
+Arvados can be configured to use "Singularity":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. See the "Singularity page in the installation guide":{{ site.baseurl }}/install/singularity.html for configuration details.
 
 h2. Design overview
 
@@ -30,6 +30,6 @@ h2. Limitations
 Arvados @Singularity@ support is a work in progress. These are the current limitations of the implementation:
 
 * Even when using the Singularity runtime, users' container images are expected to be saved in Docker format. Specifying a @.sif@ file as an image when submitting a container request is not yet supported.
-* Arvados' Singularity implementation does not yet limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by SLURM/LSF.
+* Arvados' Singularity implementation does not yet limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by Slurm/LSF.
 * The Docker ENTRYPOINT instruction is ignored.
 * Arvados is tested with Singularity version 3.7.4. Other versions may not work.
diff --git a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
index 0638ce70e..5ea72f5e7 100644
--- a/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-compute-node.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-arvados-dispatch-cloud is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm.
+ at arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
diff --git a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
index 347804662..b4987f443 100644
--- a/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
+++ b/doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-arvados-dispatch-cloud is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm.
+ at arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
diff --git a/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid b/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
index c38e6f205..7e44c8ec4 100644
--- a/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-lsf/install-dispatch.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-arvados-dispatch-lsf is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you are installing a cloud cluster.
+ at arvados-dispatch-lsf@ is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you use Slurm or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
 h2(#overview). Overview
@@ -19,7 +19,7 @@ Containers can be dispatched to an LSF cluster.  The dispatcher sends work to th
 
 In order to run containers, you must choose a user that has permission to set up FUSE mounts and run Singularity/Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
 
-Set up all of your compute nodes "as you would for a SLURM cluster":../crunch2-slurm/install-compute-node.html.
+Set up all of your compute nodes with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html.
 
 *Current limitations*:
 * Arvados container priority is not propagated to LSF job priority. This can cause inefficient use of compute resources, and even deadlock if there are fewer compute nodes than concurrent Arvados workflows.
diff --git a/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid b/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid
index eda330798..b6f1ac808 100644
--- a/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid
+++ b/doc/install/crunch2-slurm/configure-slurm.html.textile.liquid
@@ -10,10 +10,10 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+ at crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
-Containers can be dispatched to a Slurm cluster.  The dispatcher sends work to the cluster using Slurm's @sbatch@ command, so it works in a variety of SLURM configurations.
+Containers can be dispatched to a Slurm cluster.  The dispatcher sends work to the cluster using Slurm's @sbatch@ command, so it works in a variety of Slurm configurations.
 
 In order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.
 
@@ -76,7 +76,7 @@ h3. Slurm configuration essentials
 
 Whenever you change this file, you will need to update the copy _on every compute node_ as well as the controller node, and then run @sudo scontrol reconfigure at .
 
-*@ControlMachine@* should be a DNS name that resolves to the Slurm controller (dispatch/API server). This must resolve correctly on all Slurm worker nodes as well as the controller itself. In general SLURM is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.
+*@ControlMachine@* should be a DNS name that resolves to the Slurm controller (dispatch/API server). This must resolve correctly on all Slurm worker nodes as well as the controller itself. In general Slurm is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.
 
 *@SelectType=select/linear@* is needed on cloud-based installations that update node sizes dynamically, but it can only schedule one container at a time on each node. On a static or homogeneous cluster, use @SelectType=select/cons_res@ with @SelectTypeParameters=CR_CPU_Memory@ instead to enable node sharing.
 
@@ -103,7 +103,7 @@ If you want Arvados to assign names to your nodes with a different consecutive n
 * In @application.yml@: <code>assign_node_hostname: worker1-%<slot_number>04d</code>
 * In @slurm.conf@: <code>NodeName=worker1-[0000-0255]</code>
 
-If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script (see "Installing a compute node":install-compute-node.html) send its current hostname, rather than expect Arvados to assign one.
+If your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script send its current hostname, rather than expect Arvados to assign one.
 * In @application.yml@: <code>assign_node_hostname: false</code>
 * In @slurm.conf@: <code>NodeName=alice,bob,clay,darlene</code>
 
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
index 5b5b868e5..52553a35e 100644
--- a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
@@ -11,7 +11,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+ at crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
@@ -22,7 +22,7 @@ crunch-dispatch-slurm is only relevant for on premises clusters that will spool
 
 h2(#introduction). Introduction
 
-This assumes you already have a Slurm cluster, and have "set up all of your compute nodes":install-compute-node.html.  Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
+This assumes you already have a Slurm cluster, and have set up all of your compute nodes with "Docker":../crunch2/install-compute-node-docker.html or "Singularity":../crunch2/install-compute-node-singularity.html.  Slurm packages are available for CentOS, Debian and Ubuntu. Please see your distribution package repositories. For information on installing Slurm from source, see "this install guide":https://slurm.schedmd.com/quickstart_admin.html
 
 The Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@).  It is not resource-intensive, so you can run it on the API server node.
 
@@ -79,7 +79,7 @@ Some Arvados installations run a local keepstore on each compute node to handle
 h3(#PrioritySpread). Containers.Slurm.PrioritySpread
 
 crunch-dispatch-slurm adjusts the "nice" values of its Slurm jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.
-* If non-Arvados jobs run on your Slurm cluster, and your Arvados containers are waiting too long in the Slurm queue because their "nice" values are too high for them to compete with other SLURM jobs, you should use a smaller PrioritySpread value.
+* If non-Arvados jobs run on your Slurm cluster, and your Arvados containers are waiting too long in the Slurm queue because their "nice" values are too high for them to compete with other Slurm jobs, you should use a smaller PrioritySpread value.
 * If you have an older Slurm system that limits nice values to 10000, a smaller @PrioritySpread@ can help avoid reaching that limit.
 * In other cases, a larger value is beneficial because it reduces the total number of adjustments made by executing @scontrol at .
 
@@ -122,7 +122,7 @@ The choice of subsystem ("memory" in this example) must correspond to one of the
 
 Some versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice.  This causes an error when specifying a cgroup parent created outside systemd, such as those created by Slurm.
 
-You can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use Slurm's cgroups as container parents.  To do this, "configure the Docker daemon on all compute nodes":install-compute-node.html#configure_docker_daemon to run with the option @--exec-opt native.cgroupdriver=cgroupfs at .
+You can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use Slurm's cgroups as container parents.  To do this, configure the Docker daemon on all compute nodes to run with the option @--exec-opt native.cgroupdriver=cgroupfs at .
 
 {% include 'notebox_end' %}
 
diff --git a/doc/install/crunch2-slurm/install-test.html.textile.liquid b/doc/install/crunch2-slurm/install-test.html.textile.liquid
index 647995a8c..786a71d3e 100644
--- a/doc/install/crunch2-slurm/install-test.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-test.html.textile.liquid
@@ -10,7 +10,7 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+ at crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.
 {% include 'notebox_end' %}
 
 h2. Test compute node setup
@@ -26,6 +26,8 @@ If it works, this command should print @OK@ (it may also show some status messag
 
 h2. Test the dispatcher
 
+Make sure all of your compute nodes are set up with "Docker":../crunch2/install-compute-node-singularity.html or "Singularity":../crunch2/install-compute-node-docker.html.
+
 On the dispatch node, start monitoring the crunch-dispatch-slurm logs:
 
 <notextile>
diff --git a/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
similarity index 63%
rename from doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
rename to doc/install/crunch2/install-compute-node-docker.html.textile.liquid
index 8c01c44ed..7e8f1dea7 100644
--- a/doc/install/crunch2-slurm/install-compute-node.html.textile.liquid
+++ b/doc/install/crunch2/install-compute-node-docker.html.textile.liquid
@@ -1,7 +1,7 @@
 ---
 layout: default
 navsection: installguide
-title: Set up a Slurm compute node
+title: Set up a compute node with Docker
 ...
 {% comment %}
 Copyright (C) The Arvados Authors. All rights reserved.
@@ -10,16 +10,17 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
 {% include 'notebox_begin_warning' %}
-crunch-dispatch-slurm is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you are installing a cloud cluster.
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+{% include 'notebox_end' %}
+
+{% include 'notebox_begin_warning' %}
+These instructions apply when Containers.RuntimeEngine is set to @docker@, refer to "Set up a compute node with Singularity":install-compute-node-singularity.html when running @singularity at .
 {% include 'notebox_end' %}
 
 # "Introduction":#introduction
 # "Set up Docker":#docker
 # "Update fuse.conf":#fuse
 # "Update docker-cleaner.json":#docker-cleaner
-# "Configure Linux cgroups accounting":#cgroups
-# "Install Docker":#install_docker
-# "Configure the Docker daemon":#configure_docker_daemon
 # "Install'python-arvados-fuse and crunch-run and arvados-docker-cleaner":#install-packages
 
 h2(#introduction). Introduction
diff --git a/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
new file mode 100644
index 000000000..52b2612a5
--- /dev/null
+++ b/doc/install/crunch2/install-compute-node-singularity.html.textile.liquid
@@ -0,0 +1,35 @@
+---
+layout: default
+navsection: installguide
+title: Set up a compute node with Singularity
+...
+{% comment %}
+Copyright (C) The Arvados Authors. All rights reserved.
+
+SPDX-License-Identifier: CC-BY-SA-3.0
+{% endcomment %}
+
+{% include 'notebox_begin_warning' %}
+This page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf at . If you are installing a cloud cluster, refer to "Build a cloud compute node image":/install/crunch2-cloud/install-compute-node.html.
+{% include 'notebox_end' %}
+
+{% include 'notebox_begin_warning' %}
+These instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to "Set up a Slurm compute node with Docker":install-compute-node-docker.html when running @docker at .
+{% include 'notebox_end' %}
+
+# "Introduction":#introduction
+# "Set up Singularity":#singularity
+# "Update fuse.conf":#fuse
+# "Install'python-arvados-fuse and crunch-run":#install-packages
+
+h2(#introduction). Introduction
+
+This page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados, with Slurm on a static cluster. These steps must be performed on every compute node.
+
+h2(#singularity). Set up Singularity
+
+See "Singularity container runtime":../singularity.html
+
+{% assign arvados_component = 'python-arvados-fuse crunch-run' %}
+
+{% include 'install_packages' %}
diff --git a/doc/install/singularity.html.textile.liquid b/doc/install/singularity.html.textile.liquid
index b2a39790d..dfe12f314 100644
--- a/doc/install/singularity.html.textile.liquid
+++ b/doc/install/singularity.html.textile.liquid
@@ -15,7 +15,7 @@ Please refer to the "Singularity":{{site.baseurl}}/architecture/singularity.html
 
 h2(#configuration). Configuration
 
-To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or SLURM/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
+To use singularity, first make sure "Singularity is installed":https://sylabs.io/guides/3.7/user-guide/quick_start.html on your cloud worker image or Slurm/LSF compute nodes as applicable. Note @squashfs-tools@ is required.
 
 <notextile>
 <pre><code>$ <span class="userinput">singularity version</span>
diff --git a/doc/sdk/index.html.textile.liquid b/doc/sdk/index.html.textile.liquid
index b6ed39ed2..844f1ff71 100644
--- a/doc/sdk/index.html.textile.liquid
+++ b/doc/sdk/index.html.textile.liquid
@@ -9,7 +9,7 @@ Copyright (C) The Arvados Authors. All rights reserved.
 SPDX-License-Identifier: CC-BY-SA-3.0
 {% endcomment %}
 
-This section documents language bindings for the "Arvados API":{{site.baseurl}}/api and Keep that are available for various programming languages.  Not all features are available in every SDK.  The most complete SDK is the Python SDK.  Note that this section only gives a high level overview of each SDK.  Consult the "Arvados API":{{site.baseurl}}/api section for detailed documentation about Arvados API calls available on each resource.
+This section documents language bindings for the "Arvados API":{{site.baseurl}}/api/index.html and Keep that are available for various programming languages.  Not all features are available in every SDK.  The most complete SDK is the Python SDK.  Note that this section only gives a high level overview of each SDK.  Consult the "Arvados API":{{site.baseurl}}/api/index.html section for detailed documentation about Arvados API calls available on each resource.
 
 * "Python SDK":{{site.baseurl}}/sdk/python/sdk-python.html (also includes essential command line tools such as "arv-put" and "arv-get")
 * "Command line SDK":{{site.baseurl}}/sdk/cli/install.html ("arv")

commit a66c119e7198d95969fbcbcde48a05c92dbc07ac
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 19:33:48 2021 -0400

    18289: documentation: singularity refinements.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/doc/_includes/_install_compute_fuse.liquid b/doc/_includes/_install_compute_fuse.liquid
index 95679f3fa..40a7865ba 100644
--- a/doc/_includes/_install_compute_fuse.liquid
+++ b/doc/_includes/_install_compute_fuse.liquid
@@ -6,6 +6,10 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#fuse). Update fuse.conf
 
+{% include 'notebox_begin_warning' %}
+This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
+{% include 'notebox_end' %}
+
 FUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:
 
 <notextile>
diff --git a/doc/_includes/_install_docker_cleaner.liquid b/doc/_includes/_install_docker_cleaner.liquid
index f8e9e049d..1606a04e1 100644
--- a/doc/_includes/_install_docker_cleaner.liquid
+++ b/doc/_includes/_install_docker_cleaner.liquid
@@ -6,6 +6,10 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 h2(#docker-cleaner). Update docker-cleaner.json
 
+{% include 'notebox_begin_warning' %}
+This is only needed when Containers.RuntimeEngine is set to @docker@, skip this section when running @singularity at .
+{% include 'notebox_end' %}
+
 The @arvados-docker-cleaner@ program removes least recently used Docker images as needed to keep disk usage below a configured limit.
 
 Create a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.

commit 644d62c8bb6528713db56a8464ffaba94740e0e5
Author: Ward Vandewege <ward at curii.com>
Date:   Thu Oct 21 16:53:03 2021 -0400

    18289: only pass the --allow-other argument to arv-mount when the
           runtime is Docker.
    
    Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>

diff --git a/lib/crunchrun/crunchrun.go b/lib/crunchrun/crunchrun.go
index 42f143f1c..3036d5555 100644
--- a/lib/crunchrun/crunchrun.go
+++ b/lib/crunchrun/crunchrun.go
@@ -402,11 +402,14 @@ func (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {
 	arvMountCmd := []string{
 		"arv-mount",
 		"--foreground",
-		"--allow-other",
 		"--read-write",
 		"--storage-classes", strings.Join(runner.Container.OutputStorageClasses, ","),
 		fmt.Sprintf("--crunchstat-interval=%v", runner.statInterval.Seconds())}
 
+	if runner.executor.Runtime() == "docker" {
+		arvMountCmd = append(arvMountCmd, "--allow-other")
+	}
+
 	if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {
 		arvMountCmd = append(arvMountCmd, "--file-cache", fmt.Sprintf("%d", runner.Container.RuntimeConstraints.KeepCacheRAM))
 	}
diff --git a/lib/crunchrun/crunchrun_test.go b/lib/crunchrun/crunchrun_test.go
index 1131982de..4c5f517b1 100644
--- a/lib/crunchrun/crunchrun_test.go
+++ b/lib/crunchrun/crunchrun_test.go
@@ -1124,7 +1124,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 		cr.statInterval = 5 * time.Second
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}})
@@ -1144,7 +1144,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "foo,bar", "--crunchstat-interval=5",
 			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/out": {realTemp + "/tmp2", false}, "/tmp": {realTemp + "/tmp3", false}})
@@ -1164,7 +1164,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/tmp": {realTemp + "/tmp2", false}, "/etc/arvados/ca-certificates.crt": {stubCertPath, true}})
@@ -1187,7 +1187,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{"/keeptmp": {realTemp + "/keep1/tmp0", false}})
@@ -1210,7 +1210,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
@@ -1237,7 +1237,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{
@@ -1320,7 +1320,7 @@ func (s *TestSuite) TestSetupMounts(c *C) {
 
 		bindmounts, err := cr.SetupMounts()
 		c.Check(err, IsNil)
-		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground", "--allow-other",
+		c.Check(am.Cmd, DeepEquals, []string{"arv-mount", "--foreground",
 			"--read-write", "--storage-classes", "default", "--crunchstat-interval=5",
 			"--file-cache", "512", "--mount-tmp", "tmp0", "--mount-by-pdh", "by_id", "--mount-by-id", "by_uuid", realTemp + "/keep1"})
 		c.Check(bindmounts, DeepEquals, map[string]bindmount{

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list