[ARVADOS] created: 1.3.0-1433-g0ea1d6435
Git user
git at public.curoverse.com
Fri Aug 2 15:04:10 UTC 2019
at 0ea1d6435771e00c7b84b50dc0cff1a13a1d7b67 (commit)
commit 0ea1d6435771e00c7b84b50dc0cff1a13a1d7b67
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Fri Aug 2 11:03:50 2019 -0400
15467: Formatting fixes
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
index 28817dd29..8ed6dc105 100644
--- a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
@@ -55,10 +55,10 @@ Clusters:
zzzzz:
Containers:
SLURM:
- KeepServices:
+ <span class="userinput">KeepServices:
00000-bi6l4-000000000000000:
InternalURLs:
- "http://127.0.0.1:25107": {}
+ "http://127.0.0.1:25107": {}</span>
</code></pre>
</notextile>
@@ -75,7 +75,7 @@ Clusters:
</code></pre>
</notextile>
-h3(#PrioritySpread). PrioritySpread
+h3(#PrioritySpread). Containers.SLURM.PrioritySpread
crunch-dispatch-slurm adjusts the "nice" values of its SLURM jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.
* If non-Arvados jobs run on your SLURM cluster, and your Arvados containers are waiting too long in the SLURM queue because their "nice" values are too high for them to compete with other SLURM jobs, you should use a smaller PrioritySpread value.
@@ -103,8 +103,8 @@ Clusters:
zzzzz:
Containers:
SLURM:
- SbatchArgumentsList:
- - <b>"--partition=PartitionName"</b>
+ <code class="userinput">SbatchArgumentsList:
+ - <b>"--partition=PartitionName"</b></code>
</pre>
</notextile>
@@ -119,8 +119,8 @@ If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunc
Clusters:
zzzzz:
Containers:
- <code class="userinput">CrunchRunArgumentsList:
- - <b>"-cgroup-parent-subsystem=memory"</b></code>
+ <code class="userinput">CrunchRunArgumentsList:
+ - <b>"-cgroup-parent-subsystem=memory"</b></code>
</pre>
</notextile>
@@ -143,9 +143,9 @@ Older Linux kernels (prior to 3.18) have bugs in network namespace handling whic
Clusters:
zzzzz:
Containers:
- <code class="userinput">CrunchRunArgumentsList:
- - <b>"-container-enable-networking=always"</b>
- - <b>"-container-network-mode=host"</b></code>
+ <code class="userinput">CrunchRunArgumentsList:
+ - <b>"-container-enable-networking=always"</b>
+ - <b>"-container-network-mode=host"</b></code>
</pre>
</notextile>
diff --git a/doc/install/install-ws.html.textile.liquid b/doc/install/install-ws.html.textile.liquid
index 1c3b357df..f6a4bb5fa 100644
--- a/doc/install/install-ws.html.textile.liquid
+++ b/doc/install/install-ws.html.textile.liquid
@@ -51,15 +51,13 @@ Usage of arvados-ws:
h3. Update cluster config
-Edit the cluster config at @/etc/arvados/config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs at . Replace @zzzzz@ with your cluster id. Expects that @SystemRootToken@, @Services.API@ and @PostgreSQL@ sections are already filled out.
+Edit the cluster config at @/etc/arvados/config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs at . Replace @zzzzz@ with your cluster id.
<notextile>
-<pre><code>
-Clusters:
+<pre><code>Clusters:
zzzzz:
Services:
- <span class="userinput">
- Websocket:
+ <span class="userinput">Websocket:
ExternalURL: wss://ws.uuid_prefix.your.domain/websocket
InternalURLs:
"http://localhost:9003": {}
commit e7036ac7fdf2d7a1e4083d9151f14bab8db9efd0
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Fri Aug 2 10:46:11 2019 -0400
15467: Added tests for KeepServices
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/deprecated.go b/lib/config/deprecated.go
index 3777e4770..2f2455666 100644
--- a/lib/config/deprecated.go
+++ b/lib/config/deprecated.go
@@ -200,10 +200,12 @@ func loadOldClientConfig(cluster *arvados.Cluster, client *arvados.Client) {
cluster.Containers.SLURM.KeepServices = make(map[string]arvados.Service)
}
if cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs == nil {
- cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+ cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"] = arvados.Service{InternalURLs: make(map[arvados.URL]arvados.ServiceInstance)}
}
p, err := url.Parse(r)
- cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs[arvados.URL(p)] = struct{}{}
+ if err == nil {
+ cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs[arvados.URL(*p)] = struct{}{}
+ }
}
}
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index 35edb05bc..4e7790603 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -531,6 +531,9 @@ Clusters:
SLURM:
PrioritySpread: 0
SbatchArgumentsList: []
+ KeepServices:
+ SAMPLE:
+ InternalURLs: {}
Managed:
# Path to dns server configuration directory
# (e.g. /etc/unbound.d/conf.d). If false, do not write any config
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
index 300e92364..982141ad8 100644
--- a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
+++ b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
@@ -13,6 +13,7 @@ import (
"fmt"
"log"
"math"
+ "net/url"
"os"
"regexp"
"strings"
@@ -133,15 +134,16 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
ks := ""
- if length(disp.cluster.Containers.SLURM.KeepServices) > 0 {
+ if len(disp.cluster.Containers.SLURM.KeepServices) > 0 {
for _, svc := range disp.cluster.Containers.SLURM.KeepServices {
for k, _ := range svc.InternalURLs {
- ks += k
+ u := url.URL(k)
+ ks += u.String()
ks += " "
}
}
}
- os.Setenv("ARVADOS_KEEP_SERVICES", ks)
+ os.Setenv("ARVADOS_KEEP_SERVICES", strings.TrimSuffix(ks, " "))
os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
} else {
disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
index 4a0213d47..63117128e 100644
--- a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
+++ b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
@@ -11,7 +11,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "log"
"net/http"
"net/http/httptest"
"os"
@@ -396,8 +395,9 @@ func (s *StubbedSuite) TestLoadLegacyConfig(c *C) {
Client:
APIHost: example.com
AuthToken: abcdefg
- KeepServices:
- - https://example.com/keep
+ KeepServiceURIs:
+ - https://example.com/keep1
+ - https://example.com/keep2
SbatchArguments: ["--foo", "bar"]
PollPeriod: 12s
PrioritySpread: 42
@@ -408,16 +408,16 @@ BatchSize: 99
`)
tmpfile, err := ioutil.TempFile("", "example")
if err != nil {
- log.Fatal(err)
+ c.Error(err)
}
defer os.Remove(tmpfile.Name()) // clean up
if _, err := tmpfile.Write(content); err != nil {
- log.Fatal(err)
+ c.Error(err)
}
if err := tmpfile.Close(); err != nil {
- log.Fatal(err)
+ c.Error(err)
}
err = s.disp.configure("crunch-dispatch-slurm", []string{"-config", tmpfile.Name()})
@@ -433,11 +433,16 @@ BatchSize: 99
c.Check(s.disp.cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))
c.Check(s.disp.cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))
c.Check(s.disp.cluster.API.MaxItemsPerResponse, Equals, 99)
- c.Check(s.disp.cluster.Containers.SLURM.KeepServices, DeepEquals, map[string]Service{
- "00000-bi6l4-000000000000000": Service{
- InternalURLs: map[string]struct{}{
- "https://example.com/keep": struct{}{},
+ c.Check(s.disp.cluster.Containers.SLURM.KeepServices, DeepEquals, map[string]arvados.Service{
+ "00000-bi6l4-000000000000000": arvados.Service{
+ InternalURLs: map[arvados.URL]arvados.ServiceInstance{
+ arvados.URL{Scheme: "https", Path: "/keep1", Host: "example.com"}: struct{}{},
+ arvados.URL{Scheme: "https", Path: "/keep2", Host: "example.com"}: struct{}{},
},
},
})
+ ks := os.Getenv("ARVADOS_KEEP_SERVICES")
+ if ks != "https://example.com/keep1 https://example.com/keep2" && ks != "https://example.com/keep2 https://example.com/keep1" {
+ c.Assert(ks, Equals, "https://example.com/keep1 https://example.com/keep2")
+ }
}
commit bc9db394601ad6f442e0ae84f0b66da6cf5e5fb6
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Thu Aug 1 09:49:58 2019 -0400
15467: Adding KeepServices WIP
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 2b1da2f2a..051d47887 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -525,6 +525,9 @@ Clusters:
SLURM:
PrioritySpread: 0
SbatchArgumentsList: []
+ KeepServices:
+ SAMPLE:
+ InternalURLs: {}
Managed:
# Path to dns server configuration directory
# (e.g. /etc/unbound.d/conf.d). If false, do not write any config
diff --git a/lib/config/deprecated.go b/lib/config/deprecated.go
index 3e1ec7278..3777e4770 100644
--- a/lib/config/deprecated.go
+++ b/lib/config/deprecated.go
@@ -7,6 +7,7 @@ package config
import (
"fmt"
"io/ioutil"
+ "net/url"
"os"
"strings"
@@ -194,6 +195,16 @@ func loadOldClientConfig(cluster *arvados.Cluster, client *arvados.Client) {
cluster.SystemRootToken = client.AuthToken
}
cluster.TLS.Insecure = client.Insecure
+ for _, r := range client.KeepServiceURIs {
+ if cluster.Containers.SLURM.KeepServices == nil {
+ cluster.Containers.SLURM.KeepServices = make(map[string]arvados.Service)
+ }
+ if cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs == nil {
+ cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+ }
+ p, err := url.Parse(r)
+ cluster.Containers.SLURM.KeepServices["00000-bi6l4-000000000000000"].InternalURLs[arvados.URL(p)] = struct{}{}
+ }
}
// update config using values from an crunch-dispatch-slurm config file.
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index bee93046e..f63996437 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -294,6 +294,7 @@ type ContainersConfig struct {
SLURM struct {
PrioritySpread int64
SbatchArgumentsList []string
+ KeepServices map[string]Service
Managed struct {
DNSServerConfDir string
DNSServerConfTemplate string
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
index 9f69c4446..300e92364 100644
--- a/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
+++ b/services/crunch-dispatch-slurm/crunch-dispatch-slurm.go
@@ -132,7 +132,16 @@ func (disp *Dispatcher) configure(prog string, args []string) error {
if disp.Client.Insecure {
os.Setenv("ARVADOS_API_HOST_INSECURE", "1")
}
- os.Setenv("ARVADOS_KEEP_SERVICES", strings.Join(disp.Client.KeepServiceURIs, " "))
+ ks := ""
+ if length(disp.cluster.Containers.SLURM.KeepServices) > 0 {
+ for _, svc := range disp.cluster.Containers.SLURM.KeepServices {
+ for k, _ := range svc.InternalURLs {
+ ks += k
+ ks += " "
+ }
+ }
+ }
+ os.Setenv("ARVADOS_KEEP_SERVICES", ks)
os.Setenv("ARVADOS_EXTERNAL_CLIENT", "")
} else {
disp.logger.Warnf("Client credentials missing from config, so falling back on environment variables (deprecated).")
diff --git a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
index 6007c6d4a..4a0213d47 100644
--- a/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
+++ b/services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go
@@ -396,6 +396,8 @@ func (s *StubbedSuite) TestLoadLegacyConfig(c *C) {
Client:
APIHost: example.com
AuthToken: abcdefg
+ KeepServices:
+ - https://example.com/keep
SbatchArguments: ["--foo", "bar"]
PollPeriod: 12s
PrioritySpread: 42
@@ -431,4 +433,11 @@ BatchSize: 99
c.Check(s.disp.cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))
c.Check(s.disp.cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))
c.Check(s.disp.cluster.API.MaxItemsPerResponse, Equals, 99)
+ c.Check(s.disp.cluster.Containers.SLURM.KeepServices, DeepEquals, map[string]Service{
+ "00000-bi6l4-000000000000000": Service{
+ InternalURLs: map[string]struct{}{
+ "https://example.com/keep": struct{}{},
+ },
+ },
+ })
}
commit 05286e9ca2e765bcf6f54225c9d3ccbbfccf1c1e
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Wed Jul 31 15:05:23 2019 -0400
15467: Update install docs for crunch-dispatch-slurm
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
index cd338296b..28817dd29 100644
--- a/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
+++ b/doc/install/crunch2-slurm/install-dispatch.html.textile.liquid
@@ -31,23 +31,10 @@ On Debian-based systems:
</code></pre>
</notextile>
-h2. Create a dispatcher token
-
-Create an Arvados superuser token for use by the dispatcher. If you have multiple dispatch processes, you should give each one a different token.
-
-{% include 'create_superuser_token' %}
h2. Configure the dispatcher
-Set up crunch-dispatch-slurm's configuration directory:
-
-<notextile>
-<pre><code>~$ <span class="userinput">sudo mkdir -p /etc/arvados</span>
-~$ <span class="userinput">sudo install -d -o root -g <b>crunch</b> -m 0750 /etc/arvados/crunch-dispatch-slurm</span>
-</code></pre>
-</notextile>
-
-Edit @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ to authenticate to your Arvados API server, using the token you generated in the previous step. Follow this YAML format:
+Edit @/etc/arvados/config.yml at .
<notextile>
<pre><code class="userinput">Client:
@@ -58,25 +45,33 @@ Edit @/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml@ to authentic
This is the only configuration required by crunch-dispatch-slurm. The subsections below describe optional configuration flags you can set inside the main configuration object.
-h3(#KeepServiceURIs). Client::KeepServiceURIs
+h3(#KeepServiceURIs). Containers.SLURM.KeepServices
Override Keep service discovery with a predefined list of Keep URIs. This can be useful if the compute nodes run a local keepstore that should handle all Keep traffic. Example:
<notextile>
-<pre><code class="userinput">Client:
- APIHost: zzzzz.arvadosapi.com
- AuthToken: zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
- KeepServiceURIs:
- - <b>http://127.0.0.1:25107</b>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ SLURM:
+ KeepServices:
+ 00000-bi6l4-000000000000000:
+ InternalURLs:
+ "http://127.0.0.1:25107": {}
</code></pre>
</notextile>
-h3(#PollPeriod). PollPeriod
+h3(#PollPeriod). Containers.PollInterval
-crunch-dispatch-slurm polls the API server periodically for new containers to run. The @PollPeriod@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h at . For example:
+crunch-dispatch-slurm polls the API server periodically for new containers to run. The @PollInterval@ option controls how often this poll happens. Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h at . For example:
<notextile>
-<pre><code class="userinput">PollPeriod: <b>3m30s</b>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">PollInterval: <b>3m30s</b>
</code></pre>
</notextile>
@@ -90,31 +85,43 @@ crunch-dispatch-slurm adjusts the "nice" values of its SLURM jobs to ensure cont
The smallest usable value is @1 at . The default value of @10@ is used if this option is zero or negative. Example:
<notextile>
-<pre><code class="userinput">PrioritySpread: <b>1000</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ SLURM:
+ <code class="userinput">PrioritySpread: <b>1000</b></code></pre>
</notextile>
-h3(#SbatchArguments). SbatchArguments
+h3(#SbatchArguments). Containers.SLURM.SbatchArgumentsList
When crunch-dispatch-slurm invokes @sbatch@, you can add arguments to the command by specifying @SbatchArguments at . You can use this to send the jobs to specific cluster partitions or add resource requests. Set @SbatchArguments@ to an array of strings. For example:
<notextile>
-<pre><code class="userinput">SbatchArguments:
-- <b>"--partition=PartitionName"</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ SLURM:
+ SbatchArgumentsList:
+ - <b>"--partition=PartitionName"</b>
+</pre>
</notextile>
Note: If an argument is supplied multiple times, @slurm@ uses the value of the last occurrence of the argument on the command line. Arguments specified through Arvados are added after the arguments listed in SbatchArguments. This means, for example, an Arvados container with that specifies @partitions@ in @scheduling_parameter@ will override an occurrence of @--partition@ in SbatchArguments. As a result, for container parameters that can be specified through Arvados, SbatchArguments can be used to specify defaults but not enforce specific policy.
-h3(#CrunchRunCommand-cgroups). CrunchRunCommand: Dispatch to SLURM cgroups
+h3(#CrunchRunCommand-cgroups). Containers.CrunchRunArgumentList: Dispatch to SLURM cgroups
If your SLURM cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside SLURM's cgroups. This provides consistent enforcement of resource constraints. To do this, use a crunch-dispatch-slurm configuration like the following:
<notextile>
-<pre><code class="userinput">CrunchRunCommand:
-- <b>crunch-run</b>
-- <b>"-cgroup-parent-subsystem=memory"</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">CrunchRunArgumentsList:
+ - <b>"-cgroup-parent-subsystem=memory"</b></code>
+</pre>
</notextile>
The choice of subsystem ("memory" in this example) must correspond to one of the resource types enabled in SLURM's @cgroup.conf at . Limits for other resource types will also be respected. The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by SLURM. When doing this, you should also set "ReserveExtraRAM":#ReserveExtraRAM .
@@ -127,34 +134,45 @@ You can work around this issue by disabling the Docker daemon's systemd integrat
{% include 'notebox_end' %}
-h3(#CrunchRunCommand-network). CrunchRunCommand: Using host networking for containers
+h3(#CrunchRunCommand-network). Containers.CrunchRunArgumentList: Using host networking for containers
Older Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups. This by is indicated by blocked kernel tasks in "Workqueue: netns cleanup_net". If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster. Be aware this reduces container isolation, which may be a security risk.
<notextile>
-<pre><code class="userinput">CrunchRunCommand:
-- <b>crunch-run</b>
-- <b>"-container-enable-networking=always"</b>
-- <b>"-container-network-mode=host"</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">CrunchRunArgumentsList:
+ - <b>"-container-enable-networking=always"</b>
+ - <b>"-container-network-mode=host"</b></code>
+</pre>
</notextile>
-h3(#MinRetryPeriod). MinRetryPeriod: Rate-limit repeated attempts to start containers
+h3(#MinRetryPeriod). Containers.MinRetryPeriod: Rate-limit repeated attempts to start containers
If SLURM is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to SLURM more than once in the given time span.
<notextile>
-<pre><code class="userinput">MinRetryPeriod: <b>30s</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">MinRetryPeriod: <b>30s</b></code>
+</pre>
</notextile>
-h3(#ReserveExtraRAM). ReserveExtraRAM: Extra RAM for jobs
+h3(#ReserveExtraRAM). Containers.ReserveExtraRAM: Extra RAM for jobs
Extra RAM to reserve (in bytes) on each SLURM job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints at . If not provided, the default value is zero. Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process. In this situation, at least 256MiB is recommended to accomodate each container's @crunch-run@ and @arv-mount@ processes.
<notextile>
-<pre><code class="userinput">ReserveExtraRAM: <b>268435456</b>
-</code></pre>
+<pre>
+Clusters:
+ zzzzz:
+ Containers:
+ <code class="userinput">ReserveExtraRAM: <b>256MiB</b></code>
+</pre>
</notextile>
h2. Restart the dispatcher
diff --git a/doc/install/install-ws.html.textile.liquid b/doc/install/install-ws.html.textile.liquid
index daca48ec0..1c3b357df 100644
--- a/doc/install/install-ws.html.textile.liquid
+++ b/doc/install/install-ws.html.textile.liquid
@@ -51,7 +51,7 @@ Usage of arvados-ws:
h3. Update cluster config
-Edit the cluster config at @/etc/arvados/config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs at . Replace @zzzzz@ with your cluster id. Assumes that the @Services.API@ and @PostgreSQL@ sections are already filled out.
+Edit the cluster config at @/etc/arvados/config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs at . Replace @zzzzz@ with your cluster id. Expects that @SystemRootToken@, @Services.API@ and @PostgreSQL@ sections are already filled out.
<notextile>
<pre><code>
@@ -190,10 +190,10 @@ Restart Nginx to reload the API server configuration.
h3. Verify DNS and proxy setup
-Use a host elsewhere on the Internet to confirm that your DNS, proxy, and SSL are configured correctly.
+Use a host elsewhere on the Internet to confirm that your DNS, proxy, and SSL are configured correctly. For @Authorization: Bearer xxxx@ replace @xxxx@ with the value from @ManagementToken@ in @config.yml at .
<notextile>
-<pre><code>$ <span class="userinput">curl https://ws.<b>uuid_prefix.your.domain</b>/status.json</span>
-{"Clients":1}
+<pre><code>$ <span class="userinput">curl -H "Authorization: Bearer xxxx" https://ws.<b>uuid_prefix.your.domain</b>/_health/ping</span>
+{"health":"OK"}
</code></pre>
</notextile>
commit 620e4c67667f612034d748f42dce58a113e15410
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Wed Jul 31 14:08:01 2019 -0400
15467: Updating ws docs wip
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/doc/install/install-ws.html.textile.liquid b/doc/install/install-ws.html.textile.liquid
index f5f816cd7..daca48ec0 100644
--- a/doc/install/install-ws.html.textile.liquid
+++ b/doc/install/install-ws.html.textile.liquid
@@ -43,26 +43,27 @@ Verify that @arvados-ws@ is functional:
<pre><code>~$ <span class="userinput">arvados-ws -h</span>
Usage of arvados-ws:
-config path
- path to config file (default "/etc/arvados/ws/ws.yml")
+ path to config file (default "/etc/arvados/config.yml")
-dump-config
show current configuration and exit
</code></pre>
</notextile>
-h3. Create a configuration file
+h3. Update cluster config
-Create @/etc/arvados/ws/ws.yml@ using the following template. Replace @xxxxxxxx@ with the "password you generated during database setup":install-postgresql.html#api.
+Edit the cluster config at @/etc/arvados/config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs at . Replace @zzzzz@ with your cluster id. Assumes that the @Services.API@ and @PostgreSQL@ sections are already filled out.
<notextile>
-<pre><code>Client:
- APIHost: <span class="userinput">uuid_prefix.your.domain</span>:443
-Listen: ":<span class="userinput">9003</span>"
-Postgres:
- dbname: arvados_production
- host: localhost
- password: <span class="userinput">xxxxxxxx</span>
- user: arvados
-</code></pre>
+<pre><code>
+Clusters:
+ zzzzz:
+ Services:
+ <span class="userinput">
+ Websocket:
+ ExternalURL: wss://ws.uuid_prefix.your.domain/websocket
+ InternalURLs:
+ "http://localhost:9003": {}
+</span></code></pre>
</notextile>
h3. Start the service (option 1: systemd)
@@ -180,13 +181,6 @@ If you are upgrading a cluster where Nginx is configured to proxy @ws@ requests
h3. Update API server configuration
-Ensure the websocket server address is correct in the API server configuration file @/etc/arvados/api/application.yml at .
-
-<notextile>
-<pre><code>websocket_address: wss://ws.<span class="userinput">uuid_prefix.your.domain</span>/websocket
-</code></pre>
-</notextile>
-
Restart Nginx to reload the API server configuration.
<notextile>
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list