[ARVADOS] created: 1.3.0-792-g86074c13f

Git user git at public.curoverse.com
Tue Apr 23 17:30:24 UTC 2019


        at  86074c13f4441fa0804e30a1d68781175ba32e0d (commit)


commit 86074c13f4441fa0804e30a1d68781175ba32e0d
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Apr 23 13:28:55 2019 -0400

    15003: Check DB conn in controller health check. Check test services.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/build/run-tests.sh b/build/run-tests.sh
index 9690cbbb9..a33dab7c3 100755
--- a/build/run-tests.sh
+++ b/build/run-tests.sh
@@ -379,6 +379,20 @@ checkpidfile() {
     echo "${svc} pid ${pid} ok"
 }
 
+checkhealth() {
+    svc="$1"
+    port="$(cat "$WORKSPACE/tmp/${svc}.port")"
+    scheme=http
+    if [[ ${svc} =~ -ssl$ || ${svc} = wss ]]; then
+        scheme=https
+    fi
+    url="$scheme://localhost:${port}/_health/ping"
+    if ! curl -Ss -H "Authorization: Bearer e687950a23c3a9bceec28c6223a06c79" "${url}" | tee -a /dev/stderr | grep '"OK"'; then
+        echo "${url} failed"
+        return 1
+    fi
+}
+
 checkdiscoverydoc() {
     dd="https://${1}/discovery/v1/apis/arvados/v1/rest"
     if ! (set -o pipefail; curl -fsk "$dd" | grep -q ^{ ); then
@@ -412,12 +426,15 @@ start_services() {
         && checkdiscoverydoc $ARVADOS_API_HOST \
         && python sdk/python/tests/run_test_server.py start_controller \
         && checkpidfile controller \
+        && checkhealth controller \
         && python sdk/python/tests/run_test_server.py start_keep_proxy \
         && checkpidfile keepproxy \
         && python sdk/python/tests/run_test_server.py start_keep-web \
         && checkpidfile keep-web \
+        && checkhealth keep-web \
         && python sdk/python/tests/run_test_server.py start_arv-git-httpd \
         && checkpidfile arv-git-httpd \
+        && checkhealth arv-git-httpd \
         && python sdk/python/tests/run_test_server.py start_ws \
         && checkpidfile ws \
         && eval $(python sdk/python/tests/run_test_server.py start_nginx) \
diff --git a/lib/controller/handler.go b/lib/controller/handler.go
index 53125ae55..775d29034 100644
--- a/lib/controller/handler.go
+++ b/lib/controller/handler.go
@@ -72,6 +72,7 @@ func (h *Handler) setup() {
 	mux.Handle("/_health/", &health.Handler{
 		Token:  h.Cluster.ManagementToken,
 		Prefix: "/_health/",
+		Routes: health.Routes{"ping": func() error { _, err := h.db(&http.Request{}); return err }},
 	})
 	hs := http.NotFoundHandler()
 	hs = prepend(hs, h.proxyRailsAPI)
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
index 4607365c5..79767c2fa 100644
--- a/sdk/python/tests/run_test_server.py
+++ b/sdk/python/tests/run_test_server.py
@@ -413,6 +413,7 @@ def run_controller():
         f.write("""
 Clusters:
   zzzzz:
+    ManagementToken: e687950a23c3a9bceec28c6223a06c79
     HTTPRequestTimeout: 30s
     PostgreSQL:
       ConnectionPool: 32
@@ -632,6 +633,7 @@ def run_arv_git_httpd():
     agh = subprocess.Popen(
         ['arv-git-httpd',
          '-repo-root='+gitdir+'/test',
+         '-management-token=e687950a23c3a9bceec28c6223a06c79',
          '-address=:'+str(gitport)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('arv-git-httpd'), 'w') as f:
@@ -657,6 +659,7 @@ def run_keep_web():
         ['keep-web',
          '-allow-anonymous',
          '-attachment-only-host=download',
+         '-management-token=e687950a23c3a9bceec28c6223a06c79',
          '-listen=:'+str(keepwebport)],
         env=env, stdin=open('/dev/null'), stdout=logf, stderr=logf)
     with open(_pidfile('keep-web'), 'w') as f:

commit 8a207287266e997b4e9b8d10a02ce68ce1cf7b69
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Apr 23 10:28:52 2019 -0400

    15003: Split out deprecated config stuff.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/lib/config/load.go b/lib/config/deprecated.go
similarity index 51%
copy from lib/config/load.go
copy to lib/config/deprecated.go
index 5a690b03c..5d6796ea1 100644
--- a/lib/config/load.go
+++ b/lib/config/deprecated.go
@@ -5,24 +5,14 @@
 package config
 
 import (
-	"bytes"
-	"encoding/json"
-	"errors"
 	"fmt"
-	"io"
-	"io/ioutil"
 	"os"
 	"strings"
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
 	"github.com/ghodss/yaml"
-	"github.com/imdario/mergo"
 )
 
-type logger interface {
-	Warnf(string, ...interface{})
-}
-
 type deprRequestLimits struct {
 	MaxItemsPerResponse            *int
 	MultiClusterRequestConcurrency *int
@@ -37,84 +27,12 @@ type deprecatedConfig struct {
 	Clusters map[string]deprCluster
 }
 
-func LoadFile(path string, log logger) (*arvados.Config, error) {
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	return Load(f, log)
-}
-
-func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
-	var cfg arvados.Config
-	buf, err := ioutil.ReadAll(rdr)
-	if err != nil {
-		return nil, err
-	}
-
-	// Load the config into a dummy map to get the cluster ID
-	// keys, discarding the values; then set up defaults for each
-	// cluster ID; then load the real config on top of the
-	// defaults.
-	var dummy struct {
-		Clusters map[string]struct{}
-	}
-	err = yaml.Unmarshal(buf, &dummy)
-	if err != nil {
-		return nil, err
-	}
-	if len(dummy.Clusters) == 0 {
-		return nil, errors.New("config does not define any clusters")
-	}
-
-	// We can't merge deep structs here; instead, we unmarshal the
-	// default & loaded config files into generic maps, merge
-	// those, and then json-encode+decode the result into the
-	// config struct type.
-	var merged map[string]interface{}
-	for id := range dummy.Clusters {
-		var src map[string]interface{}
-		err = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte(" xxxxx:"), []byte(" "+id+":"), -1), &src)
-		if err != nil {
-			return nil, fmt.Errorf("loading defaults for %s: %s", id, err)
-		}
-		mergo.Merge(&merged, src, mergo.WithOverride)
-	}
-	var src map[string]interface{}
-	err = yaml.Unmarshal(buf, &src)
-	if err != nil {
-		return nil, fmt.Errorf("loading config data: %s", err)
-	}
-	mergo.Merge(&merged, src, mergo.WithOverride)
-
-	var errEnc error
-	pr, pw := io.Pipe()
-	go func() {
-		errEnc = json.NewEncoder(pw).Encode(merged)
-		pw.Close()
-	}()
-	err = json.NewDecoder(pr).Decode(&cfg)
-	if errEnc != nil {
-		err = errEnc
-	}
-	if err != nil {
-		return nil, fmt.Errorf("transcoding config data: %s", err)
-	}
-
+func applyDeprecatedConfig(cfg *arvados.Config, configdata []byte, log logger) error {
 	var dc deprecatedConfig
-	err = yaml.Unmarshal(buf, &dc)
-	if err != nil {
-		return nil, err
-	}
-	err = applyDeprecatedConfig(&cfg, &dc, log)
+	err := yaml.Unmarshal(configdata, &dc)
 	if err != nil {
-		return nil, err
+		return err
 	}
-	return &cfg, nil
-}
-
-func applyDeprecatedConfig(cfg *arvados.Config, dc *deprecatedConfig, log logger) error {
 	hostname, err := os.Hostname()
 	if err != nil {
 		return err
diff --git a/lib/config/deprecated_test.go b/lib/config/deprecated_test.go
new file mode 100644
index 000000000..bdce5b542
--- /dev/null
+++ b/lib/config/deprecated_test.go
@@ -0,0 +1,51 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+	"os"
+
+	check "gopkg.in/check.v1"
+)
+
+func (s *LoadSuite) TestDeprecatedNodeProfilesToServices(c *check.C) {
+	hostname, err := os.Hostname()
+	c.Assert(err, check.IsNil)
+	s.checkEquivalent(c, `
+Clusters:
+ z1111:
+  NodeProfiles:
+   "*":
+    arvados-dispatch-cloud:
+     listen: ":9006"
+    arvados-controller:
+     listen: ":9004"
+   `+hostname+`:
+    arvados-api-server:
+     listen: ":8000"
+`, `
+Clusters:
+ z1111:
+  Services:
+   RailsAPI:
+    InternalURLs:
+     "http://`+hostname+`:8000": {}
+   Controller:
+    InternalURLs:
+     "http://`+hostname+`:9004": {}
+   DispatchCloud:
+    InternalURLs:
+     "http://`+hostname+`:9006": {}
+  NodeProfiles:
+   "*":
+    arvados-dispatch-cloud:
+     listen: ":9006"
+    arvados-controller:
+     listen: ":9004"
+   `+hostname+`:
+    arvados-api-server:
+     listen: ":8000"
+`)
+}
diff --git a/lib/config/load.go b/lib/config/load.go
index 5a690b03c..e9ae5c6d1 100644
--- a/lib/config/load.go
+++ b/lib/config/load.go
@@ -12,7 +12,6 @@ import (
 	"io"
 	"io/ioutil"
 	"os"
-	"strings"
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
 	"github.com/ghodss/yaml"
@@ -23,20 +22,6 @@ type logger interface {
 	Warnf(string, ...interface{})
 }
 
-type deprRequestLimits struct {
-	MaxItemsPerResponse            *int
-	MultiClusterRequestConcurrency *int
-}
-
-type deprCluster struct {
-	RequestLimits deprRequestLimits
-	NodeProfiles  map[string]arvados.NodeProfile
-}
-
-type deprecatedConfig struct {
-	Clusters map[string]deprCluster
-}
-
 func LoadFile(path string, log logger) (*arvados.Config, error) {
 	f, err := os.Open(path)
 	if err != nil {
@@ -102,62 +87,9 @@ func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
 		return nil, fmt.Errorf("transcoding config data: %s", err)
 	}
 
-	var dc deprecatedConfig
-	err = yaml.Unmarshal(buf, &dc)
-	if err != nil {
-		return nil, err
-	}
-	err = applyDeprecatedConfig(&cfg, &dc, log)
+	err = applyDeprecatedConfig(&cfg, buf, log)
 	if err != nil {
 		return nil, err
 	}
 	return &cfg, nil
 }
-
-func applyDeprecatedConfig(cfg *arvados.Config, dc *deprecatedConfig, log logger) error {
-	hostname, err := os.Hostname()
-	if err != nil {
-		return err
-	}
-	for id, dcluster := range dc.Clusters {
-		cluster, ok := cfg.Clusters[id]
-		if !ok {
-			return fmt.Errorf("can't load legacy config %q that is not present in current config", id)
-		}
-		for name, np := range dcluster.NodeProfiles {
-			if name == "*" || name == os.Getenv("ARVADOS_NODE_PROFILE") || name == hostname {
-				applyDeprecatedNodeProfile(hostname, np.RailsAPI, &cluster.Services.RailsAPI)
-				applyDeprecatedNodeProfile(hostname, np.Controller, &cluster.Services.Controller)
-				applyDeprecatedNodeProfile(hostname, np.DispatchCloud, &cluster.Services.DispatchCloud)
-			}
-		}
-		if dst, n := &cluster.API.MaxItemsPerResponse, dcluster.RequestLimits.MaxItemsPerResponse; n != nil && *n != *dst {
-			log.Warnf("overriding Clusters.%s.API.MaxItemsPerResponse with deprecated config RequestLimits.MultiClusterRequestConcurrency = %d", id, *n)
-			*dst = *n
-		}
-		if dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {
-			log.Warnf("overriding Clusters.%s.API.MaxRequestAmplification with deprecated config RequestLimits.MultiClusterRequestConcurrency = %d", id, *n)
-			*dst = *n
-		}
-		cfg.Clusters[id] = cluster
-	}
-	return nil
-}
-
-func applyDeprecatedNodeProfile(hostname string, ssi arvados.SystemServiceInstance, svc *arvados.Service) {
-	scheme := "https"
-	if !ssi.TLS {
-		scheme = "http"
-	}
-	if svc.InternalURLs == nil {
-		svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
-	}
-	host := ssi.Listen
-	if host == "" {
-		return
-	}
-	if strings.HasPrefix(host, ":") {
-		host = hostname + host
-	}
-	svc.InternalURLs[arvados.URL{Scheme: scheme, Host: host}] = arvados.ServiceInstance{}
-}
diff --git a/lib/config/load_test.go b/lib/config/load_test.go
index 277ff423a..315e021eb 100644
--- a/lib/config/load_test.go
+++ b/lib/config/load_test.go
@@ -53,46 +53,6 @@ func (s *LoadSuite) TestMultipleClusters(c *check.C) {
 	c.Check(c2.ClusterID, check.Equals, "z2222")
 }
 
-func (s *LoadSuite) TestNodeProfilesToServices(c *check.C) {
-	hostname, err := os.Hostname()
-	c.Assert(err, check.IsNil)
-	s.checkEquivalent(c, `
-Clusters:
- z1111:
-  NodeProfiles:
-   "*":
-    arvados-dispatch-cloud:
-     listen: ":9006"
-    arvados-controller:
-     listen: ":9004"
-   `+hostname+`:
-    arvados-api-server:
-     listen: ":8000"
-`, `
-Clusters:
- z1111:
-  Services:
-   RailsAPI:
-    InternalURLs:
-     "http://`+hostname+`:8000": {}
-   Controller:
-    InternalURLs:
-     "http://`+hostname+`:9004": {}
-   DispatchCloud:
-    InternalURLs:
-     "http://`+hostname+`:9006": {}
-  NodeProfiles:
-   "*":
-    arvados-dispatch-cloud:
-     listen: ":9006"
-    arvados-controller:
-     listen: ":9004"
-   `+hostname+`:
-    arvados-api-server:
-     listen: ":8000"
-`)
-}
-
 func (s *LoadSuite) TestMovedKeys(c *check.C) {
 	s.checkEquivalent(c, `# config has old keys only
 Clusters:

commit 889c4f35d4b74d7c6f043790b04104fec9d8e37b
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Apr 23 09:44:54 2019 -0400

    15003: Move keys from RequestLimits to API section.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/lib/config/cmd_test.go b/lib/config/cmd_test.go
index 0a60c25b5..39dcb4fe6 100644
--- a/lib/config/cmd_test.go
+++ b/lib/config/cmd_test.go
@@ -28,6 +28,19 @@ func (s *CommandSuite) TestEmptyInput(c *check.C) {
 	c.Check(stderr.String(), check.Matches, `config does not define any clusters\n`)
 }
 
+func (s *CommandSuite) TestLogDeprecatedKeys(c *check.C) {
+	var stdout, stderr bytes.Buffer
+	in := `
+Clusters:
+ z1234:
+  RequestLimits:
+    MaxItemsPerResponse: 1234
+`
+	code := DumpCommand.RunCommand("arvados dump-config", nil, bytes.NewBufferString(in), &stdout, &stderr)
+	c.Check(code, check.Equals, 0)
+	c.Check(stderr.String(), check.Matches, `(?ms).*overriding Clusters.z1234.API.MaxItemsPerResponse .* = 1234.*`)
+}
+
 func (s *CommandSuite) TestUnknownKey(c *check.C) {
 	var stdout, stderr bytes.Buffer
 	in := `
@@ -38,6 +51,7 @@ Clusters:
 `
 	code := DumpCommand.RunCommand("arvados dump-config", nil, bytes.NewBufferString(in), &stdout, &stderr)
 	c.Check(code, check.Equals, 0)
+	c.Check(stderr.String(), check.Equals, "")
 	c.Check(stdout.String(), check.Matches, `(?ms)Clusters:\n  z1234:\n.*`)
 	c.Check(stdout.String(), check.Matches, `(?ms).*\n *ManagementToken: secret\n.*`)
 	c.Check(stdout.String(), check.Not(check.Matches), `(?ms).*UnknownKey.*`)
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index bea638753..c767f7613 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -70,7 +70,7 @@ Clusters:
         # All parameters here are passed to the PG client library in a connection string;
         # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
         Host: ""
-        Port: 0
+        Port: ""
         User: ""
         Password: ""
         DBName: ""
@@ -109,6 +109,10 @@ Clusters:
       # update on the permission view in the future, if not already scheduled.
       AsyncPermissionsUpdateInterval: 20
 
+      # Maximum number of concurrent outgoing requests to make while
+      # serving a single incoming multi-cluster (federated) request.
+      MaxRequestAmplification: 4
+
       # RailsSessionSecretToken is a string of alphanumeric characters
       # used by Rails to sign session tokens. IMPORTANT: This is a
       # site secret. It should be at least 50 characters.
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index a24a9055f..3c16e8955 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -76,7 +76,7 @@ Clusters:
         # All parameters here are passed to the PG client library in a connection string;
         # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
         Host: ""
-        Port: 0
+        Port: ""
         User: ""
         Password: ""
         DBName: ""
@@ -115,6 +115,15 @@ Clusters:
       # update on the permission view in the future, if not already scheduled.
       AsyncPermissionsUpdateInterval: 20
 
+      # Maximum number of concurrent outgoing requests to make while
+      # serving a single incoming multi-cluster (federated) request.
+      MaxRequestAmplification: 4
+
+      # RailsSessionSecretToken is a string of alphanumeric characters
+      # used by Rails to sign session tokens. IMPORTANT: This is a
+      # site secret. It should be at least 50 characters.
+      RailsSessionSecretToken: ""
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
diff --git a/lib/config/load.go b/lib/config/load.go
index 159dd65dc..5a690b03c 100644
--- a/lib/config/load.go
+++ b/lib/config/load.go
@@ -6,6 +6,7 @@ package config
 
 import (
 	"bytes"
+	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
@@ -15,16 +16,25 @@ import (
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
 	"github.com/ghodss/yaml"
+	"github.com/imdario/mergo"
 )
 
 type logger interface {
 	Warnf(string, ...interface{})
 }
 
+type deprRequestLimits struct {
+	MaxItemsPerResponse            *int
+	MultiClusterRequestConcurrency *int
+}
+
+type deprCluster struct {
+	RequestLimits deprRequestLimits
+	NodeProfiles  map[string]arvados.NodeProfile
+}
+
 type deprecatedConfig struct {
-	Clusters map[string]struct {
-		NodeProfiles map[string]arvados.NodeProfile
-	}
+	Clusters map[string]deprCluster
 }
 
 func LoadFile(path string, log logger) (*arvados.Config, error) {
@@ -57,18 +67,41 @@ func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
 	if len(dummy.Clusters) == 0 {
 		return nil, errors.New("config does not define any clusters")
 	}
+
+	// We can't merge deep structs here; instead, we unmarshal the
+	// default & loaded config files into generic maps, merge
+	// those, and then json-encode+decode the result into the
+	// config struct type.
+	var merged map[string]interface{}
 	for id := range dummy.Clusters {
-		err = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte("xxxxx"), []byte(id), -1), &cfg)
+		var src map[string]interface{}
+		err = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte(" xxxxx:"), []byte(" "+id+":"), -1), &src)
 		if err != nil {
 			return nil, fmt.Errorf("loading defaults for %s: %s", id, err)
 		}
+		mergo.Merge(&merged, src, mergo.WithOverride)
 	}
-	err = yaml.Unmarshal(buf, &cfg)
+	var src map[string]interface{}
+	err = yaml.Unmarshal(buf, &src)
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("loading config data: %s", err)
+	}
+	mergo.Merge(&merged, src, mergo.WithOverride)
+
+	var errEnc error
+	pr, pw := io.Pipe()
+	go func() {
+		errEnc = json.NewEncoder(pw).Encode(merged)
+		pw.Close()
+	}()
+	err = json.NewDecoder(pr).Decode(&cfg)
+	if errEnc != nil {
+		err = errEnc
+	}
+	if err != nil {
+		return nil, fmt.Errorf("transcoding config data: %s", err)
 	}
 
-	// Check for deprecated config values, and apply them to cfg.
 	var dc deprecatedConfig
 	err = yaml.Unmarshal(buf, &dc)
 	if err != nil {
@@ -98,6 +131,14 @@ func applyDeprecatedConfig(cfg *arvados.Config, dc *deprecatedConfig, log logger
 				applyDeprecatedNodeProfile(hostname, np.DispatchCloud, &cluster.Services.DispatchCloud)
 			}
 		}
+		if dst, n := &cluster.API.MaxItemsPerResponse, dcluster.RequestLimits.MaxItemsPerResponse; n != nil && *n != *dst {
+			log.Warnf("overriding Clusters.%s.API.MaxItemsPerResponse with deprecated config RequestLimits.MultiClusterRequestConcurrency = %d", id, *n)
+			*dst = *n
+		}
+		if dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {
+			log.Warnf("overriding Clusters.%s.API.MaxRequestAmplification with deprecated config RequestLimits.MultiClusterRequestConcurrency = %d", id, *n)
+			*dst = *n
+		}
 		cfg.Clusters[id] = cluster
 	}
 	return nil
diff --git a/lib/config/load_test.go b/lib/config/load_test.go
index f00ce33fd..277ff423a 100644
--- a/lib/config/load_test.go
+++ b/lib/config/load_test.go
@@ -38,6 +38,8 @@ func (s *LoadSuite) TestNoConfigs(c *check.C) {
 	cc, err := cfg.GetCluster("z1111")
 	c.Assert(err, check.IsNil)
 	c.Check(cc.ClusterID, check.Equals, "z1111")
+	c.Check(cc.API.MaxRequestAmplification, check.Equals, 4)
+	c.Check(cc.API.MaxItemsPerResponse, check.Equals, 1000)
 }
 
 func (s *LoadSuite) TestMultipleClusters(c *check.C) {
@@ -91,14 +93,46 @@ Clusters:
 `)
 }
 
+func (s *LoadSuite) TestMovedKeys(c *check.C) {
+	s.checkEquivalent(c, `# config has old keys only
+Clusters:
+ zzzzz:
+  RequestLimits:
+   MultiClusterRequestConcurrency: 3
+   MaxItemsPerResponse: 999
+`, `
+Clusters:
+ zzzzz:
+  API:
+   MaxRequestAmplification: 3
+   MaxItemsPerResponse: 999
+`)
+	s.checkEquivalent(c, `# config has both old and new keys; old values win
+Clusters:
+ zzzzz:
+  RequestLimits:
+   MultiClusterRequestConcurrency: 0
+   MaxItemsPerResponse: 555
+  API:
+   MaxRequestAmplification: 3
+   MaxItemsPerResponse: 999
+`, `
+Clusters:
+ zzzzz:
+  API:
+   MaxRequestAmplification: 0
+   MaxItemsPerResponse: 555
+`)
+}
+
 func (s *LoadSuite) checkEquivalent(c *check.C, goty, expectedy string) {
 	got, err := Load(bytes.NewBufferString(goty), ctxlog.TestLogger(c))
 	c.Assert(err, check.IsNil)
 	expected, err := Load(bytes.NewBufferString(expectedy), ctxlog.TestLogger(c))
 	c.Assert(err, check.IsNil)
 	if !c.Check(got, check.DeepEquals, expected) {
-		cmd := exec.Command("diff", "-u", "--label", "got", "--label", "expected", "/dev/fd/3", "/dev/fd/4")
-		for _, obj := range []interface{}{got, expected} {
+		cmd := exec.Command("diff", "-u", "--label", "expected", "--label", "got", "/dev/fd/3", "/dev/fd/4")
+		for _, obj := range []interface{}{expected, got} {
 			y, _ := yaml.Marshal(obj)
 			pr, pw, err := os.Pipe()
 			c.Assert(err, check.IsNil)
diff --git a/lib/controller/fed_collections.go b/lib/controller/fed_collections.go
index ab49e39d1..07daf2f90 100644
--- a/lib/controller/fed_collections.go
+++ b/lib/controller/fed_collections.go
@@ -217,17 +217,15 @@ func fetchRemoteCollectionByPDH(
 	// returned to the client.  When that happens, all
 	// other outstanding requests are cancelled
 	sharedContext, cancelFunc := context.WithCancel(req.Context())
+	defer cancelFunc()
+
 	req = req.WithContext(sharedContext)
 	wg := sync.WaitGroup{}
 	pdh := m[1]
 	success := make(chan *http.Response)
 	errorChan := make(chan error, len(h.handler.Cluster.RemoteClusters))
 
-	// use channel as a semaphore to limit the number of concurrent
-	// requests at a time
-	sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
-
-	defer cancelFunc()
+	acquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)
 
 	for remoteID := range h.handler.Cluster.RemoteClusters {
 		if remoteID == h.handler.Cluster.ClusterID {
@@ -238,9 +236,8 @@ func fetchRemoteCollectionByPDH(
 		wg.Add(1)
 		go func(remote string) {
 			defer wg.Done()
-			// blocks until it can put a value into the
-			// channel (which has a max queue capacity)
-			sem <- true
+			acquire()
+			defer release()
 			select {
 			case <-sharedContext.Done():
 				return
@@ -278,7 +275,6 @@ func fetchRemoteCollectionByPDH(
 			case success <- newResponse:
 				wasSuccess = true
 			}
-			<-sem
 		}(remoteID)
 	}
 	go func() {
diff --git a/lib/controller/fed_generic.go b/lib/controller/fed_generic.go
index 9c8b1614b..fd2fbc226 100644
--- a/lib/controller/fed_generic.go
+++ b/lib/controller/fed_generic.go
@@ -175,9 +175,9 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
 		httpserver.Error(w, "Federated multi-object may not provide 'limit', 'offset' or 'order'.", http.StatusBadRequest)
 		return true
 	}
-	if expectCount > h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse() {
+	if max := h.handler.Cluster.API.MaxItemsPerResponse; expectCount > max {
 		httpserver.Error(w, fmt.Sprintf("Federated multi-object request for %v objects which is more than max page size %v.",
-			expectCount, h.handler.Cluster.RequestLimits.GetMaxItemsPerResponse()), http.StatusBadRequest)
+			expectCount, max), http.StatusBadRequest)
 		return true
 	}
 	if req.Form.Get("select") != "" {
@@ -203,10 +203,7 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
 
 	// Perform concurrent requests to each cluster
 
-	// use channel as a semaphore to limit the number of concurrent
-	// requests at a time
-	sem := make(chan bool, h.handler.Cluster.RequestLimits.GetMultiClusterRequestConcurrency())
-	defer close(sem)
+	acquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)
 	wg := sync.WaitGroup{}
 
 	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
@@ -220,23 +217,20 @@ func (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.Response
 			// Nothing to query
 			continue
 		}
-
-		// blocks until it can put a value into the
-		// channel (which has a max queue capacity)
-		sem <- true
+		acquire()
 		wg.Add(1)
 		go func(k string, v []string) {
+			defer release()
+			defer wg.Done()
 			rp, kn, err := h.remoteQueryUUIDs(w, req, k, v)
 			mtx.Lock()
+			defer mtx.Unlock()
 			if err == nil {
 				completeResponses = append(completeResponses, rp...)
 				kind = kn
 			} else {
 				errors = append(errors, err)
 			}
-			mtx.Unlock()
-			wg.Done()
-			<-sem
 		}(k, v)
 	}
 	wg.Wait()
diff --git a/lib/controller/federation_test.go b/lib/controller/federation_test.go
index 62916acd2..c4aa33c15 100644
--- a/lib/controller/federation_test.go
+++ b/lib/controller/federation_test.go
@@ -64,9 +64,9 @@ func (s *FederationSuite) SetUpTest(c *check.C) {
 		NodeProfiles: map[string]arvados.NodeProfile{
 			"*": nodeProfile,
 		},
-		RequestLimits: arvados.RequestLimits{
-			MaxItemsPerResponse:            1000,
-			MultiClusterRequestConcurrency: 4,
+		API: arvados.API{
+			MaxItemsPerResponse:     1000,
+			MaxRequestAmplification: 4,
 		},
 	}, NodeProfile: &nodeProfile}
 	s.testServer = newServerFromIntegrationTestEnv(c)
@@ -850,7 +850,7 @@ func (s *FederationSuite) TestListMultiRemoteContainersMissing(c *check.C) {
 }
 
 func (s *FederationSuite) TestListMultiRemoteContainerPageSizeError(c *check.C) {
-	s.testHandler.Cluster.RequestLimits.MaxItemsPerResponse = 1
+	s.testHandler.Cluster.API.MaxItemsPerResponse = 1
 	req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
 		url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
 			arvadostest.QueuedContainerUUID))),
diff --git a/lib/controller/semaphore.go b/lib/controller/semaphore.go
new file mode 100644
index 000000000..ff607bbb5
--- /dev/null
+++ b/lib/controller/semaphore.go
@@ -0,0 +1,14 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package controller
+
+func semaphore(max int) (acquire, release func()) {
+	if max > 0 {
+		ch := make(chan bool, max)
+		return func() { ch <- true }, func() { <-ch }
+	} else {
+		return func() {}, func() {}
+	}
+}
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index 32763acd1..610a3d288 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -51,9 +51,9 @@ func (sc *Config) GetCluster(clusterID string) (*Cluster, error) {
 	}
 }
 
-type RequestLimits struct {
-	MaxItemsPerResponse            int
-	MultiClusterRequestConcurrency int
+type API struct {
+	MaxItemsPerResponse     int
+	MaxRequestAmplification int
 }
 
 type Cluster struct {
@@ -68,7 +68,7 @@ type Cluster struct {
 	HTTPRequestTimeout Duration
 	RemoteClusters     map[string]RemoteCluster
 	PostgreSQL         PostgreSQL
-	RequestLimits      RequestLimits
+	API                API
 	Logging            Logging
 	TLS                TLS
 }
@@ -332,20 +332,6 @@ func (np *NodeProfile) ServicePorts() map[ServiceName]string {
 	}
 }
 
-func (h RequestLimits) GetMultiClusterRequestConcurrency() int {
-	if h.MultiClusterRequestConcurrency == 0 {
-		return 4
-	}
-	return h.MultiClusterRequestConcurrency
-}
-
-func (h RequestLimits) GetMaxItemsPerResponse() int {
-	if h.MaxItemsPerResponse == 0 {
-		return 1000
-	}
-	return h.MaxItemsPerResponse
-}
-
 type SystemServiceInstance struct {
 	Listen   string
 	TLS      bool
diff --git a/sdk/go/arvados/postgresql.go b/sdk/go/arvados/postgresql.go
index 47953ce9d..1969441da 100644
--- a/sdk/go/arvados/postgresql.go
+++ b/sdk/go/arvados/postgresql.go
@@ -9,6 +9,9 @@ import "strings"
 func (c PostgreSQLConnection) String() string {
 	s := ""
 	for k, v := range c {
+		if v == "" {
+			continue
+		}
 		s += strings.ToLower(k)
 		s += "='"
 		s += strings.Replace(
diff --git a/services/api/config/arvados_config.rb b/services/api/config/arvados_config.rb
index 669beb16e..cb76b68dc 100644
--- a/services/api/config/arvados_config.rb
+++ b/services/api/config/arvados_config.rb
@@ -172,7 +172,7 @@ dbcfg = ConfigLoader.new
 
 dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool
 dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host
-dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port
+dbcfg.declare_config "PostgreSQL.Connection.Port", String, :port
 dbcfg.declare_config "PostgreSQL.Connection.User", String, :username
 dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password
 dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 5e2ed2e32..cfcba1b21 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -523,6 +523,12 @@
 			"revisionTime": "2016-08-13T22:13:03Z"
 		},
 		{
+			"checksumSHA1": "x7IEwuVYTztOJItr3jtePGyFDWA=",
+			"path": "github.com/imdario/mergo",
+			"revision": "5ef87b449ca75fbed1bc3765b749ca8f73f1fa69",
+			"revisionTime": "2019-04-15T13:31:43Z"
+		},
+		{
 			"checksumSHA1": "iCsyavJDnXC9OY//p52IWJWy7PY=",
 			"path": "github.com/jbenet/go-context/io",
 			"revision": "d14ea06fba99483203c19d92cfcd13ebe73135f4",

commit 6a08de4e6d04ed9706ae5f6502c9ff3d26a8bc0e
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Apr 16 16:12:12 2019 -0400

    15003: Use config lib for arvados-server.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/lib/config/load.go b/lib/config/load.go
index e16a6e4b7..159dd65dc 100644
--- a/lib/config/load.go
+++ b/lib/config/load.go
@@ -27,6 +27,15 @@ type deprecatedConfig struct {
 	}
 }
 
+func LoadFile(path string, log logger) (*arvados.Config, error) {
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	return Load(f, log)
+}
+
 func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
 	var cfg arvados.Config
 	buf, err := ioutil.ReadAll(rdr)
diff --git a/lib/service/cmd.go b/lib/service/cmd.go
index e853da943..4b7341d72 100644
--- a/lib/service/cmd.go
+++ b/lib/service/cmd.go
@@ -15,6 +15,7 @@ import (
 	"os"
 
 	"git.curoverse.com/arvados.git/lib/cmd"
+	"git.curoverse.com/arvados.git/lib/config"
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
 	"git.curoverse.com/arvados.git/sdk/go/ctxlog"
 	"git.curoverse.com/arvados.git/sdk/go/httpserver"
@@ -69,7 +70,7 @@ func (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout
 	} else if err != nil {
 		return 2
 	}
-	cfg, err := arvados.GetConfig(*configFile)
+	cfg, err := config.LoadFile(*configFile, log)
 	if err != nil {
 		return 1
 	}
diff --git a/sdk/python/tests/run_test_server.py b/sdk/python/tests/run_test_server.py
index 6687ca491..4607365c5 100644
--- a/sdk/python/tests/run_test_server.py
+++ b/sdk/python/tests/run_test_server.py
@@ -417,10 +417,10 @@ Clusters:
     PostgreSQL:
       ConnectionPool: 32
       Connection:
-        host: {}
-        dbname: {}
-        user: {}
-        password: {}
+        Host: {}
+        DBName: {}
+        User: {}
+        Password: {}
     NodeProfiles:
       "*":
         "arvados-controller":

commit 50d3c3897f0fad1bfcc4fc86096155d15d25483e
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Apr 16 15:33:33 2019 -0400

    15003: Add 'arvados-server dump-config' command.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/cmd/arvados-server/cmd.go b/cmd/arvados-server/cmd.go
index cd15d25dd..cad540c91 100644
--- a/cmd/arvados-server/cmd.go
+++ b/cmd/arvados-server/cmd.go
@@ -8,6 +8,7 @@ import (
 	"os"
 
 	"git.curoverse.com/arvados.git/lib/cmd"
+	"git.curoverse.com/arvados.git/lib/config"
 	"git.curoverse.com/arvados.git/lib/controller"
 	"git.curoverse.com/arvados.git/lib/dispatchcloud"
 )
@@ -21,6 +22,7 @@ var (
 
 		"controller":     controller.Command,
 		"dispatch-cloud": dispatchcloud.Command,
+		"dump-config":    config.DumpCommand,
 	})
 )
 
diff --git a/lib/config/cmd.go b/lib/config/cmd.go
new file mode 100644
index 000000000..aa3e3bca1
--- /dev/null
+++ b/lib/config/cmd.go
@@ -0,0 +1,45 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+	"fmt"
+	"io"
+
+	"git.curoverse.com/arvados.git/lib/cmd"
+	"git.curoverse.com/arvados.git/sdk/go/ctxlog"
+	"github.com/ghodss/yaml"
+)
+
+var DumpCommand cmd.Handler = dumpCommand{}
+
+type dumpCommand struct{}
+
+func (dumpCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {
+	var err error
+	defer func() {
+		if err != nil {
+			fmt.Fprintf(stderr, "%s\n", err)
+		}
+	}()
+	if len(args) != 0 {
+		err = fmt.Errorf("usage: %s <config-src.yaml >config-min.yaml", prog)
+		return 2
+	}
+	log := ctxlog.New(stderr, "text", "info")
+	cfg, err := Load(stdin, log)
+	if err != nil {
+		return 1
+	}
+	out, err := yaml.Marshal(cfg)
+	if err != nil {
+		return 1
+	}
+	_, err = stdout.Write(out)
+	if err != nil {
+		return 1
+	}
+	return 0
+}
diff --git a/lib/config/cmd_test.go b/lib/config/cmd_test.go
new file mode 100644
index 000000000..0a60c25b5
--- /dev/null
+++ b/lib/config/cmd_test.go
@@ -0,0 +1,44 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+	"bytes"
+
+	check "gopkg.in/check.v1"
+)
+
+var _ = check.Suite(&CommandSuite{})
+
+type CommandSuite struct{}
+
+func (s *CommandSuite) TestBadArg(c *check.C) {
+	var stderr bytes.Buffer
+	code := DumpCommand.RunCommand("arvados dump-config", []string{"-badarg"}, bytes.NewBuffer(nil), bytes.NewBuffer(nil), &stderr)
+	c.Check(code, check.Equals, 2)
+	c.Check(stderr.String(), check.Matches, `(?ms)usage: .*`)
+}
+
+func (s *CommandSuite) TestEmptyInput(c *check.C) {
+	var stdout, stderr bytes.Buffer
+	code := DumpCommand.RunCommand("arvados dump-config", nil, &bytes.Buffer{}, &stdout, &stderr)
+	c.Check(code, check.Equals, 1)
+	c.Check(stderr.String(), check.Matches, `config does not define any clusters\n`)
+}
+
+func (s *CommandSuite) TestUnknownKey(c *check.C) {
+	var stdout, stderr bytes.Buffer
+	in := `
+Clusters:
+ z1234:
+  UnknownKey: foobar
+  ManagementToken: secret
+`
+	code := DumpCommand.RunCommand("arvados dump-config", nil, bytes.NewBufferString(in), &stdout, &stderr)
+	c.Check(code, check.Equals, 0)
+	c.Check(stdout.String(), check.Matches, `(?ms)Clusters:\n  z1234:\n.*`)
+	c.Check(stdout.String(), check.Matches, `(?ms).*\n *ManagementToken: secret\n.*`)
+	c.Check(stdout.String(), check.Not(check.Matches), `(?ms).*UnknownKey.*`)
+}

commit 90a750e42e27ee5cfdffa65ba675b19005cbb345
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Apr 16 14:05:53 2019 -0400

    15003: Load config over defaults.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/lib/config/load.go b/lib/config/load.go
new file mode 100644
index 000000000..e16a6e4b7
--- /dev/null
+++ b/lib/config/load.go
@@ -0,0 +1,113 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+
+	"git.curoverse.com/arvados.git/sdk/go/arvados"
+	"github.com/ghodss/yaml"
+)
+
+type logger interface {
+	Warnf(string, ...interface{})
+}
+
+type deprecatedConfig struct {
+	Clusters map[string]struct {
+		NodeProfiles map[string]arvados.NodeProfile
+	}
+}
+
+func Load(rdr io.Reader, log logger) (*arvados.Config, error) {
+	var cfg arvados.Config
+	buf, err := ioutil.ReadAll(rdr)
+	if err != nil {
+		return nil, err
+	}
+
+	// Load the config into a dummy map to get the cluster ID
+	// keys, discarding the values; then set up defaults for each
+	// cluster ID; then load the real config on top of the
+	// defaults.
+	var dummy struct {
+		Clusters map[string]struct{}
+	}
+	err = yaml.Unmarshal(buf, &dummy)
+	if err != nil {
+		return nil, err
+	}
+	if len(dummy.Clusters) == 0 {
+		return nil, errors.New("config does not define any clusters")
+	}
+	for id := range dummy.Clusters {
+		err = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte("xxxxx"), []byte(id), -1), &cfg)
+		if err != nil {
+			return nil, fmt.Errorf("loading defaults for %s: %s", id, err)
+		}
+	}
+	err = yaml.Unmarshal(buf, &cfg)
+	if err != nil {
+		return nil, err
+	}
+
+	// Check for deprecated config values, and apply them to cfg.
+	var dc deprecatedConfig
+	err = yaml.Unmarshal(buf, &dc)
+	if err != nil {
+		return nil, err
+	}
+	err = applyDeprecatedConfig(&cfg, &dc, log)
+	if err != nil {
+		return nil, err
+	}
+	return &cfg, nil
+}
+
+func applyDeprecatedConfig(cfg *arvados.Config, dc *deprecatedConfig, log logger) error {
+	hostname, err := os.Hostname()
+	if err != nil {
+		return err
+	}
+	for id, dcluster := range dc.Clusters {
+		cluster, ok := cfg.Clusters[id]
+		if !ok {
+			return fmt.Errorf("can't load legacy config %q that is not present in current config", id)
+		}
+		for name, np := range dcluster.NodeProfiles {
+			if name == "*" || name == os.Getenv("ARVADOS_NODE_PROFILE") || name == hostname {
+				applyDeprecatedNodeProfile(hostname, np.RailsAPI, &cluster.Services.RailsAPI)
+				applyDeprecatedNodeProfile(hostname, np.Controller, &cluster.Services.Controller)
+				applyDeprecatedNodeProfile(hostname, np.DispatchCloud, &cluster.Services.DispatchCloud)
+			}
+		}
+		cfg.Clusters[id] = cluster
+	}
+	return nil
+}
+
+func applyDeprecatedNodeProfile(hostname string, ssi arvados.SystemServiceInstance, svc *arvados.Service) {
+	scheme := "https"
+	if !ssi.TLS {
+		scheme = "http"
+	}
+	if svc.InternalURLs == nil {
+		svc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}
+	}
+	host := ssi.Listen
+	if host == "" {
+		return
+	}
+	if strings.HasPrefix(host, ":") {
+		host = hostname + host
+	}
+	svc.InternalURLs[arvados.URL{Scheme: scheme, Host: host}] = arvados.ServiceInstance{}
+}
diff --git a/lib/config/load_test.go b/lib/config/load_test.go
new file mode 100644
index 000000000..f00ce33fd
--- /dev/null
+++ b/lib/config/load_test.go
@@ -0,0 +1,116 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"os/exec"
+	"testing"
+
+	"git.curoverse.com/arvados.git/sdk/go/ctxlog"
+	"github.com/ghodss/yaml"
+	check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+	check.TestingT(t)
+}
+
+var _ = check.Suite(&LoadSuite{})
+
+type LoadSuite struct{}
+
+func (s *LoadSuite) TestEmpty(c *check.C) {
+	cfg, err := Load(&bytes.Buffer{}, ctxlog.TestLogger(c))
+	c.Check(cfg, check.IsNil)
+	c.Assert(err, check.ErrorMatches, `config does not define any clusters`)
+}
+
+func (s *LoadSuite) TestNoConfigs(c *check.C) {
+	cfg, err := Load(bytes.NewBufferString(`Clusters: {"z1111": {}}`), ctxlog.TestLogger(c))
+	c.Assert(err, check.IsNil)
+	c.Assert(cfg.Clusters, check.HasLen, 1)
+	cc, err := cfg.GetCluster("z1111")
+	c.Assert(err, check.IsNil)
+	c.Check(cc.ClusterID, check.Equals, "z1111")
+}
+
+func (s *LoadSuite) TestMultipleClusters(c *check.C) {
+	cfg, err := Load(bytes.NewBufferString(`{"Clusters":{"z1111":{},"z2222":{}}}`), ctxlog.TestLogger(c))
+	c.Assert(err, check.IsNil)
+	c1, err := cfg.GetCluster("z1111")
+	c.Assert(err, check.IsNil)
+	c.Check(c1.ClusterID, check.Equals, "z1111")
+	c2, err := cfg.GetCluster("z2222")
+	c.Assert(err, check.IsNil)
+	c.Check(c2.ClusterID, check.Equals, "z2222")
+}
+
+func (s *LoadSuite) TestNodeProfilesToServices(c *check.C) {
+	hostname, err := os.Hostname()
+	c.Assert(err, check.IsNil)
+	s.checkEquivalent(c, `
+Clusters:
+ z1111:
+  NodeProfiles:
+   "*":
+    arvados-dispatch-cloud:
+     listen: ":9006"
+    arvados-controller:
+     listen: ":9004"
+   `+hostname+`:
+    arvados-api-server:
+     listen: ":8000"
+`, `
+Clusters:
+ z1111:
+  Services:
+   RailsAPI:
+    InternalURLs:
+     "http://`+hostname+`:8000": {}
+   Controller:
+    InternalURLs:
+     "http://`+hostname+`:9004": {}
+   DispatchCloud:
+    InternalURLs:
+     "http://`+hostname+`:9006": {}
+  NodeProfiles:
+   "*":
+    arvados-dispatch-cloud:
+     listen: ":9006"
+    arvados-controller:
+     listen: ":9004"
+   `+hostname+`:
+    arvados-api-server:
+     listen: ":8000"
+`)
+}
+
+func (s *LoadSuite) checkEquivalent(c *check.C, goty, expectedy string) {
+	got, err := Load(bytes.NewBufferString(goty), ctxlog.TestLogger(c))
+	c.Assert(err, check.IsNil)
+	expected, err := Load(bytes.NewBufferString(expectedy), ctxlog.TestLogger(c))
+	c.Assert(err, check.IsNil)
+	if !c.Check(got, check.DeepEquals, expected) {
+		cmd := exec.Command("diff", "-u", "--label", "got", "--label", "expected", "/dev/fd/3", "/dev/fd/4")
+		for _, obj := range []interface{}{got, expected} {
+			y, _ := yaml.Marshal(obj)
+			pr, pw, err := os.Pipe()
+			c.Assert(err, check.IsNil)
+			defer pr.Close()
+			go func() {
+				io.Copy(pw, bytes.NewBuffer(y))
+				pw.Close()
+			}()
+			cmd.ExtraFiles = append(cmd.ExtraFiles, pr)
+		}
+		diff, err := cmd.CombinedOutput()
+		c.Log(string(diff))
+		c.Check(err, check.IsNil)
+	}
+}
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index 2965d5ecb..32763acd1 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -105,6 +105,10 @@ func (su *URL) UnmarshalText(text []byte) error {
 	return err
 }
 
+func (su URL) MarshalText() ([]byte, error) {
+	return []byte(fmt.Sprintf("%s", (*url.URL)(&su).String())), nil
+}
+
 type ServiceInstance struct{}
 
 type Logging struct {

commit a5cce35e9cb84ef0487e13058702fc7368c8d546
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Fri Apr 12 16:04:37 2019 -0400

    15003: Generate config.DefaultYAML from source file.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/build/run-tests.sh b/build/run-tests.sh
index 714143b13..9690cbbb9 100755
--- a/build/run-tests.sh
+++ b/build/run-tests.sh
@@ -987,6 +987,7 @@ gostuff=(
     lib/cloud
     lib/cloud/azure
     lib/cloud/ec2
+    lib/config
     lib/dispatchcloud
     lib/dispatchcloud/container
     lib/dispatchcloud/scheduler
diff --git a/lib/config/generate.go b/lib/config/generate.go
new file mode 100644
index 000000000..c192d7bb2
--- /dev/null
+++ b/lib/config/generate.go
@@ -0,0 +1,72 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+// +build ignore
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+)
+
+func main() {
+	err := generate()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func generate() error {
+	outfn := "generated_config.go"
+	tmpfile, err := ioutil.TempFile(".", "."+outfn+".")
+	if err != nil {
+		return err
+	}
+	defer os.Remove(tmpfile.Name())
+
+	gofmt := exec.Command("gofmt", "-s")
+	gofmt.Stdout = tmpfile
+	gofmt.Stderr = os.Stderr
+	w, err := gofmt.StdinPipe()
+	if err != nil {
+		return err
+	}
+	gofmt.Start()
+
+	// copyright header: same as this file
+	cmd := exec.Command("head", "-n", "4", "generate.go")
+	cmd.Stdout = w
+	cmd.Stderr = os.Stderr
+	err = cmd.Run()
+	if err != nil {
+		return err
+	}
+
+	data, err := ioutil.ReadFile("config.default.yml")
+	if err != nil {
+		return err
+	}
+	_, err = fmt.Fprintf(w, "package config\nvar DefaultYAML = []byte(`%s`)", bytes.Replace(data, []byte{'`'}, []byte("`+\"`\"+`"), -1))
+	if err != nil {
+		return err
+	}
+	err = w.Close()
+	if err != nil {
+		return err
+	}
+	err = gofmt.Wait()
+	if err != nil {
+		return err
+	}
+	err = tmpfile.Close()
+	if err != nil {
+		return err
+	}
+	return os.Rename(tmpfile.Name(), outfn)
+}
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
new file mode 100644
index 000000000..a24a9055f
--- /dev/null
+++ b/lib/config/generated_config.go
@@ -0,0 +1,457 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+var DefaultYAML = []byte(`# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
+#
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.default.yml
+
+Clusters:
+  xxxxx:
+    SystemRootToken: ""
+
+    # Token to be included in all healthcheck requests. Disabled by default.
+    # Server expects request header of the format "Authorization: Bearer xxx"
+    ManagementToken: ""
+
+    Services:
+      RailsAPI:
+        InternalURLs: {}
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+      Controller:
+        InternalURLs: {}
+        ExternalURL: ""
+      Websocket:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepbalance:
+        InternalURLs: {}
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      GitSSH:
+        ExternalURL: ""
+      DispatchCloud:
+        InternalURLs: {}
+      SSO:
+        ExternalURL: ""
+      Keepproxy:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAV:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAVDownload:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+      Composer:
+        ExternalURL: ""
+      WebShell:
+        ExternalURL: ""
+      Workbench1:
+        InternalURLs: {}
+        ExternalURL: ""
+      Workbench2:
+        ExternalURL: ""
+    PostgreSQL:
+      # max concurrent connections per arvados server daemon
+      ConnectionPool: 32
+      Connection:
+        # All parameters here are passed to the PG client library in a connection string;
+        # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+        Host: ""
+        Port: 0
+        User: ""
+        Password: ""
+        DBName: ""
+    API:
+      # Maximum size (in bytes) allowed for a single API request.  This
+      # limit is published in the discovery document for use by clients.
+      # Note: You must separately configure the upstream web server or
+      # proxy to actually enforce the desired maximum request size on the
+      # server side.
+      MaxRequestSize: 134217728
+
+      # Limit the number of bytes read from the database during an index
+      # request (by retrieving and returning fewer rows than would
+      # normally be returned in a single response).
+      # Note 1: This setting never reduces the number of returned rows to
+      # zero, no matter how big the first data row is.
+      # Note 2: Currently, this is only checked against a specific set of
+      # columns that tend to get large (collections.manifest_text,
+      # containers.mounts, workflows.definition). Other fields (e.g.,
+      # "properties" hashes) are not counted against this limit.
+      MaxIndexDatabaseRead: 134217728
+
+      # Maximum number of items to return when responding to a APIs that
+      # can return partial result sets using limit and offset parameters
+      # (e.g., *.index, groups.contents). If a request specifies a "limit"
+      # parameter higher than this value, this value is used instead.
+      MaxItemsPerResponse: 1000
+
+      # API methods to disable. Disabled methods are not listed in the
+      # discovery document, and respond 404 to all requests.
+      # Example: ["jobs.create", "pipeline_instances.create"]
+      DisabledAPIs: []
+
+      # Interval (seconds) between asynchronous permission view updates. Any
+      # permission-updating API called with the 'async' parameter schedules a an
+      # update on the permission view in the future, if not already scheduled.
+      AsyncPermissionsUpdateInterval: 20
+
+    Users:
+      # Config parameters to automatically setup new users.  If enabled,
+      # this users will be able to self-activate.  Enable this if you want
+      # to run an open instance where anyone can create an account and use
+      # the system without requiring manual approval.
+      #
+      # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+      # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+      AutoSetupNewUsers: false
+      AutoSetupNewUsersWithVmUUID: ""
+      AutoSetupNewUsersWithRepository: false
+      AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+      # When new_users_are_active is set to true, new users will be active
+      # immediately.  This skips the "self-activate" step which enforces
+      # user agreements.  Should only be enabled for development.
+      NewUsersAreActive: false
+
+      # The e-mail address of the user you would like to become marked as an admin
+      # user on their first login.
+      # In the default configuration, authentication happens through the Arvados SSO
+      # server, which uses OAuth2 against Google's servers, so in that case this
+      # should be an address associated with a Google account.
+      AutoAdminUserWithEmail: ""
+
+      # If auto_admin_first_user is set to true, the first user to log in when no
+      # other admin users exist will automatically become an admin user.
+      AutoAdminFirstUser: false
+
+      # Email address to notify whenever a user creates a profile for the
+      # first time
+      UserProfileNotificationAddress: ""
+      AdminNotifierEmailFrom: arvados at example.com
+      EmailSubjectPrefix: "[ARVADOS] "
+      UserNotifierEmailFrom: arvados at example.com
+      NewUserNotificationRecipients: []
+      NewInactiveUserNotificationRecipients: []
+
+    AuditLogs:
+      # Time to keep audit logs, in seconds. (An audit log is a row added
+      # to the "logs" table in the PostgreSQL database each time an
+      # Arvados object is created, modified, or deleted.)
+      #
+      # Currently, websocket event notifications rely on audit logs, so
+      # this should not be set lower than 600 (5 minutes).
+      MaxAge: 1209600
+
+      # Maximum number of log rows to delete in a single SQL transaction.
+      #
+      # If max_audit_log_delete_batch is 0, log entries will never be
+      # deleted by Arvados. Cleanup can be done by an external process
+      # without affecting any Arvados system processes, as long as very
+      # recent (<5 minutes old) logs are not deleted.
+      #
+      # 100000 is a reasonable batch size for most sites.
+      MaxDeleteBatch: 0
+
+      # Attributes to suppress in events and audit logs.  Notably,
+      # specifying ["manifest_text"] here typically makes the database
+      # smaller and faster.
+      #
+      # Warning: Using any non-empty value here can have undesirable side
+      # effects for any client or component that relies on event logs.
+      # Use at your own risk.
+      UnloggedAttributes: []
+
+    SystemLogs:
+      # Maximum characters of (JSON-encoded) query parameters to include
+      # in each request log entry. When params exceed this size, they will
+      # be JSON-encoded, truncated to this size, and logged as
+      # params_truncated.
+      MaxRequestLogParamsSize: 2000
+
+    Collections:
+      # Allow clients to create collections by providing a manifest with
+      # unsigned data blob locators. IMPORTANT: This effectively disables
+      # access controls for data stored in Keep: a client who knows a hash
+      # can write a manifest that references the hash, pass it to
+      # collections.create (which will create a permission link), use
+      # collections.get to obtain a signature for that data locator, and
+      # use that signed locator to retrieve the data from Keep. Therefore,
+      # do not turn this on if your users expect to keep data private from
+      # one another!
+      BlobSigning: true
+
+      # blob_signing_key is a string of alphanumeric characters used to
+      # generate permission signatures for Keep locators. It must be
+      # identical to the permission key given to Keep. IMPORTANT: This is
+      # a site secret. It should be at least 50 characters.
+      #
+      # Modifying blob_signing_key will invalidate all existing
+      # signatures, which can cause programs to fail (e.g., arv-put,
+      # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
+      # no such processes are running.
+      BlobSigningKey: ""
+
+      # Default replication level for collections. This is used when a
+      # collection's replication_desired attribute is nil.
+      DefaultReplication: 2
+
+      # Lifetime (in seconds) of blob permission signatures generated by
+      # the API server. This determines how long a client can take (after
+      # retrieving a collection record) to retrieve the collection data
+      # from Keep. If the client needs more time than that (assuming the
+      # collection still has the same content and the relevant user/token
+      # still has permission) the client can retrieve the collection again
+      # to get fresh signatures.
+      #
+      # This must be exactly equal to the -blob-signature-ttl flag used by
+      # keepstore servers.  Otherwise, reading data blocks and saving
+      # collections will fail with HTTP 403 permission errors.
+      #
+      # Modifying blob_signature_ttl invalidates existing signatures; see
+      # blob_signing_key note above.
+      #
+      # The default is 2 weeks.
+      BlobSigningTTL: 1209600
+
+      # Default lifetime for ephemeral collections: 2 weeks. This must not
+      # be less than blob_signature_ttl.
+      DefaultTrashLifetime: 1209600
+
+      # Interval (seconds) between trash sweeps. During a trash sweep,
+      # collections are marked as trash if their trash_at time has
+      # arrived, and deleted if their delete_at time has arrived.
+      TrashSweepInterval: 60
+
+      # If true, enable collection versioning.
+      # When a collection's preserve_version field is true or the current version
+      # is older than the amount of seconds defined on preserve_version_if_idle,
+      # a snapshot of the collection's previous state is created and linked to
+      # the current collection.
+      CollectionVersioning: false
+
+      #   0 = auto-create a new version on every update.
+      #  -1 = never auto-create new versions.
+      # > 0 = auto-create a new version when older than the specified number of seconds.
+      PreserveVersionIfIdle: -1
+
+    Login:
+      # These settings are provided by your OAuth2 provider (e.g.,
+      # sso-provider).
+      ProviderAppSecret: ""
+      ProviderAppID: ""
+
+    Git:
+      # Git repositories must be readable by api server, or you won't be
+      # able to submit crunch jobs. To pass the test suites, put a clone
+      # of the arvados tree in {git_repositories_dir}/arvados.git or
+      # {git_repositories_dir}/arvados/.git
+      Repositories: /var/lib/arvados/git/repositories
+
+    TLS:
+      Insecure: false
+
+    Containers:
+      # List of supported Docker Registry image formats that compute nodes
+      # are able to use. ` + "`" + `arv keep docker` + "`" + ` will error out if a user tries
+      # to store an image with an unsupported format. Use an empty array
+      # to skip the compatibility check (and display a warning message to
+      # that effect).
+      #
+      # Example for sites running docker < 1.10: ["v1"]
+      # Example for sites running docker >= 1.10: ["v2"]
+      # Example for disabling check: []
+      SupportedDockerImageFormats: ["v2"]
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      # Default value for keep_cache_ram of a container's runtime_constraints.
+      DefaultKeepCacheRAM: 268435456
+
+      # Number of times a container can be unlocked before being
+      # automatically cancelled.
+      MaxDispatchAttempts: 5
+
+      # Default value for container_count_max for container requests.  This is the
+      # number of times Arvados will create a new container to satisfy a container
+      # request.  If a container is cancelled it will retry a new container if
+      # container_count < container_count_max on any container requests associated
+      # with the cancelled container.
+      MaxRetryAttempts: 3
+
+      # The maximum number of compute nodes that can be in use simultaneously
+      # If this limit is reduced, any existing nodes with slot number >= new limit
+      # will not be counted against the new limit. In other words, the new limit
+      # won't be strictly enforced until those nodes with higher slot numbers
+      # go down.
+      MaxComputeVMs: 64
+
+      # Preemptible instance support (e.g. AWS Spot Instances)
+      # When true, child containers will get created with the preemptible
+      # scheduling parameter parameter set.
+      UsePreemptibleInstances: false
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      Logging:
+        # When you run the db:delete_old_container_logs task, it will find
+        # containers that have been finished for at least this many seconds,
+        # and delete their stdout, stderr, arv-mount, crunch-run, and
+        # crunchstat logs from the logs table.
+        MaxAge: 720h
+
+        # These two settings control how frequently log events are flushed to the
+        # database.  Log lines are buffered until either crunch_log_bytes_per_event
+        # has been reached or crunch_log_seconds_between_events has elapsed since
+        # the last flush.
+        LogBytesPerEvent: 4096
+        LogSecondsBetweenEvents: 1
+
+        # The sample period for throttling logs, in seconds.
+        LogThrottlePeriod: 60
+
+        # Maximum number of bytes that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleBytes: 65536
+
+        # Maximum number of lines that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleLines: 1024
+
+        # Maximum bytes that may be logged by a single job.  Log bytes that are
+        # silenced by throttling are not counted against this total.
+        LimitLogBytesPerJob: 67108864
+
+        LogPartialLineThrottlePeriod: 5
+
+        # Container logs are written to Keep and saved in a collection,
+        # which is updated periodically while the container runs.  This
+        # value sets the interval (given in seconds) between collection
+        # updates.
+        LogUpdatePeriod: 1800
+
+        # The log collection is also updated when the specified amount of
+        # log data (given in bytes) is produced in less than one update
+        # period.
+        LogUpdateSize: 33554432
+
+      SLURM:
+        Managed:
+          # Path to dns server configuration directory
+          # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+          # files or touch restart.txt (see below).
+          DNSServerConfDir: ""
+
+          # Template file for the dns server host snippets. See
+          # unbound.template in this directory for an example. If false, do
+          # not write any config files.
+          DNSServerConfTemplate: ""
+
+          # String to write to {dns_server_conf_dir}/restart.txt (with a
+          # trailing newline) after updating local data. If false, do not
+          # open or write the restart.txt file.
+          DNSServerReloadCommand: ""
+
+          # Command to run after each DNS update. Template variables will be
+          # substituted; see the "unbound" example below. If false, do not run
+          # a command.
+          DNSServerUpdateCommand: ""
+
+          ComputeNodeDomain: ""
+          ComputeNodeNameservers:
+            - 192.168.1.1
+
+          # Hostname to assign to a compute node when it sends a "ping" and the
+          # hostname in its Node record is nil.
+          # During bootstrapping, the "ping" script is expected to notice the
+          # hostname given in the ping response, and update its unix hostname
+          # accordingly.
+          # If false, leave the hostname alone (this is appropriate if your compute
+          # nodes' hostnames are already assigned by some other mechanism).
+          #
+          # One way or another, the hostnames of your node records should agree
+          # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+          #
+          # Example for compute0000, compute0001, ....:
+          # assign_node_hostname: compute%<slot_number>04d
+          # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+          AssignNodeHostname: "compute%<slot_number>d"
+
+      JobsAPI:
+        # Enable the legacy Jobs API.  This value must be a string.
+        # 'auto' -- (default) enable the Jobs API only if it has been used before
+        #         (i.e., there are job records in the database)
+        # 'true' -- enable the Jobs API despite lack of existing records.
+        # 'false' -- disable the Jobs API despite presence of existing records.
+        Enable: 'auto'
+
+        # Git repositories must be readable by api server, or you won't be
+        # able to submit crunch jobs. To pass the test suites, put a clone
+        # of the arvados tree in {git_repositories_dir}/arvados.git or
+        # {git_repositories_dir}/arvados/.git
+        GitInternalDir: /var/lib/arvados/internal.git
+
+        # Docker image to be used when none found in runtime_constraints of a job
+        DefaultDockerImage: ""
+
+        # none or slurm_immediate
+        CrunchJobWrapper: none
+
+        # username, or false = do not set uid when running jobs.
+        CrunchJobUser: crunch
+
+        # The web service must be able to create/write this file, and
+        # crunch-job must be able to stat() it.
+        CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+        # Control job reuse behavior when two completed jobs match the
+        # search criteria and have different outputs.
+        #
+        # If true, in case of a conflict, reuse the earliest job (this is
+        # similar to container reuse behavior).
+        #
+        # If false, in case of a conflict, do not reuse any completed job,
+        # but do reuse an already-running job if available (this is the
+        # original job reuse behavior, and is still the default).
+        ReuseJobIfOutputsDiffer: false
+
+    Mail:
+      MailchimpAPIKey: ""
+      MailchimpListID: ""
+      SendUserSetupNotificationEmail: ""
+      IssueReporterEmailFrom: ""
+      IssueReporterEmailTo: ""
+      SupportEmailAddress: ""
+      EmailFrom: ""
+    RemoteClusters:
+      "*":
+        Proxy: false
+        ActivateUsers: false
+`)
diff --git a/lib/config/uptodate.go b/lib/config/uptodate.go
new file mode 100644
index 000000000..71bdba710
--- /dev/null
+++ b/lib/config/uptodate.go
@@ -0,0 +1,7 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+//go:generate go run generate.go
diff --git a/lib/config/uptodate_test.go b/lib/config/uptodate_test.go
new file mode 100644
index 000000000..10551f81e
--- /dev/null
+++ b/lib/config/uptodate_test.go
@@ -0,0 +1,22 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package config
+
+import (
+	"bytes"
+	"io/ioutil"
+	"testing"
+)
+
+func TestUpToDate(t *testing.T) {
+	src := "config.default.yml"
+	srcdata, err := ioutil.ReadFile(src)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !bytes.Equal(srcdata, DefaultYAML) {
+		t.Fatalf("content of %s differs from DefaultYAML -- you need to run 'go generate' and commit", src)
+	}
+}

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list