[ARVADOS] updated: 1.3.0-1618-g9dcec2ce4

Git user git at public.curoverse.com
Wed Sep 18 20:31:19 UTC 2019


Summary of changes:
 doc/admin/upgrading.html.textile.liquid           |   8 +-
 doc/install/install-keepstore.html.textile.liquid |  15 ++-
 lib/config/deprecated_keepstore.go                | 130 ++++++++++++++++++++--
 lib/config/deprecated_keepstore_test.go           |  72 ++++++++++--
 lib/config/load.go                                |  12 +-
 sdk/go/arvados/keep_service.go                    |  15 ++-
 services/keepstore/azure_blob_volume.go           |   4 +-
 services/keepstore/command.go                     |   4 +-
 services/keepstore/handler_test.go                |  10 +-
 services/keepstore/s3_volume.go                   |   8 +-
 services/keepstore/s3_volume_test.go              |   4 +-
 services/keepstore/unix_volume.go                 |   4 +-
 services/keepstore/volume.go                      |  11 +-
 services/keepstore/volume_generic_test.go         |   9 +-
 14 files changed, 247 insertions(+), 59 deletions(-)

       via  9dcec2ce4a077f14204fdfd6c4b1ec208ea281ab (commit)
       via  32d0b751985b6e8adef29a71c3e4542e87f7c54f (commit)
       via  0e0f69c6164339f4d0babb1d9d5a68fae24c01d5 (commit)
       via  453fe7852de9b01a10ace4ded7b3b7825326e599 (commit)
       via  fc86370e31e1bb3a5d7b234419e29015c165bc67 (commit)
      from  baa2bf80cc078868191494ccb1631c426f4e0496 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 9dcec2ce4a077f14204fdfd6c4b1ec208ea281ab
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Wed Sep 18 16:28:46 2019 -0400

    13647: Advise deleting keep_services entries after migrating.
    
    Also warn about orphaned keepstore servers with no configured volumes,
    and volumes configured to be accessed by nonexistent keepstore
    servers.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/lib/config/deprecated_keepstore.go b/lib/config/deprecated_keepstore.go
index 1e44052a6..1969c4c3c 100644
--- a/lib/config/deprecated_keepstore.go
+++ b/lib/config/deprecated_keepstore.go
@@ -16,6 +16,7 @@ import (
 	"os"
 	"strconv"
 	"strings"
+	"time"
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
 	"github.com/sirupsen/logrus"
@@ -106,7 +107,7 @@ func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
 
 	var oc oldKeepstoreConfig
 	err = ldr.loadOldConfigHelper("keepstore", ldr.KeepstorePath, &oc)
-	if os.IsNotExist(err) && (ldr.KeepstorePath == defaultKeepstoreConfigPath) {
+	if os.IsNotExist(err) && ldr.KeepstorePath == defaultKeepstoreConfigPath {
 		return nil
 	} else if err != nil {
 		return err
@@ -403,7 +404,7 @@ func (ldr *Loader) alreadyMigrated(oldvol oldKeepstoreVolume, newvols map[string
 			var params arvados.DirectoryVolumeDriverParameters
 			if err := json.Unmarshal(newvol.DriverParameters, &params); err == nil &&
 				oldvol.Root == params.Root {
-				if _, ok := newvol.AccessViaHosts[myURL]; ok {
+				if _, ok := newvol.AccessViaHosts[myURL]; ok || len(newvol.AccessViaHosts) == 0 {
 					return uuid, true
 				}
 			}
@@ -519,14 +520,7 @@ func findKeepServicesItem(cluster *arvados.Cluster, listen string) (uuid string,
 		if ks.ServiceType == "proxy" {
 			continue
 		} else if keepServiceIsMe(ks, hostname, listen) {
-			url := arvados.URL{
-				Scheme: "http",
-				Host:   net.JoinHostPort(ks.ServiceHost, strconv.Itoa(ks.ServicePort)),
-			}
-			if ks.ServiceSSLFlag {
-				url.Scheme = "https"
-			}
-			return ks.UUID, url, nil
+			return ks.UUID, keepServiceURL(ks), nil
 		} else {
 			tried = append(tried, fmt.Sprintf("%s:%d", ks.ServiceHost, ks.ServicePort))
 		}
@@ -535,6 +529,17 @@ func findKeepServicesItem(cluster *arvados.Cluster, listen string) (uuid string,
 	return
 }
 
+func keepServiceURL(ks arvados.KeepService) arvados.URL {
+	url := arvados.URL{
+		Scheme: "http",
+		Host:   net.JoinHostPort(ks.ServiceHost, strconv.Itoa(ks.ServicePort)),
+	}
+	if ks.ServiceSSLFlag {
+		url.Scheme = "https"
+	}
+	return url
+}
+
 var localhostOrAllInterfaces = map[string]bool{
 	"localhost": true,
 	"127.0.0.1": true,
@@ -570,3 +575,106 @@ func keepServiceIsMe(ks arvados.KeepService, hostname string, listen string) boo
 	kshost := strings.ToLower(ks.ServiceHost)
 	return localhostOrAllInterfaces[kshost] || strings.HasPrefix(kshost+".", strings.ToLower(hostname)+".")
 }
+
+// Warn about pending keepstore migration tasks that haven't already
+// been warned about in loadOldKeepstoreConfig() -- i.e., unmigrated
+// keepstore hosts other than the present host, and obsolete content
+// in the keep_services table.
+func (ldr *Loader) checkPendingKeepstoreMigrations(cluster arvados.Cluster) error {
+	if cluster.Services.Controller.ExternalURL.String() == "" {
+		ldr.Logger.Debug("Services.Controller.ExternalURL not configured -- skipping check for pending keepstore config migrations")
+		return nil
+	}
+	client, err := arvados.NewClientFromConfig(&cluster)
+	if err != nil {
+		return err
+	}
+	client.AuthToken = cluster.SystemRootToken
+	var svcList arvados.KeepServiceList
+	err = client.RequestAndDecode(&svcList, "GET", "arvados/v1/keep_services", nil, nil)
+	if err != nil {
+		ldr.Logger.WithError(err).Warn("error retrieving keep_services list -- skipping check for pending keepstore config migrations")
+		return nil
+	}
+	hostname, err := os.Hostname()
+	if err != nil {
+		return fmt.Errorf("error getting hostname: %s", err)
+	}
+	sawTimes := map[time.Time]bool{}
+	for _, ks := range svcList.Items {
+		sawTimes[ks.CreatedAt] = true
+		sawTimes[ks.ModifiedAt] = true
+	}
+	if len(sawTimes) <= 1 {
+		// If all timestamps in the arvados/v1/keep_services
+		// response are identical, it's a clear sign the
+		// response was generated on the fly from the cluster
+		// config, rather than real database records. In that
+		// case (as well as the case where none are listed at
+		// all) it's pointless to look for entries that
+		// haven't yet been migrated to the config file.
+		return nil
+	}
+	needDBRows := false
+	for _, ks := range svcList.Items {
+		if ks.ServiceType == "proxy" {
+			if len(cluster.Services.Keepproxy.InternalURLs) == 0 {
+				needDBRows = true
+				ldr.Logger.Warn("you should migrate your keepproxy configuration to the cluster configuration file")
+			}
+			continue
+		}
+		kshost := strings.ToLower(ks.ServiceHost)
+		if localhostOrAllInterfaces[kshost] || strings.HasPrefix(kshost+".", strings.ToLower(hostname)+".") {
+			// it would be confusing to recommend
+			// migrating *this* host's legacy keepstore
+			// config immediately after explaining that
+			// very migration process in more detail.
+			continue
+		}
+		ksurl := keepServiceURL(ks)
+		if _, ok := cluster.Services.Keepstore.InternalURLs[ksurl]; ok {
+			// already added to InternalURLs
+			continue
+		}
+		ldr.Logger.Warnf("you should migrate the legacy keepstore configuration file on host %s", ks.ServiceHost)
+	}
+	if !needDBRows {
+		ldr.Logger.Warn("you should delete all of your manually added keep_services listings using `arv --format=uuid keep_service list | xargs -n1 arv keep_service delete --uuid` -- when those are deleted, the services listed in your cluster configuration will be used instead")
+	}
+	return nil
+}
+
+// Warn about keepstore servers that have no volumes.
+func (ldr *Loader) checkEmptyKeepstores(cluster arvados.Cluster) error {
+servers:
+	for url := range cluster.Services.Keepstore.InternalURLs {
+		for _, vol := range cluster.Volumes {
+			if len(vol.AccessViaHosts) == 0 {
+				// accessible by all servers
+				return nil
+			}
+			if _, ok := vol.AccessViaHosts[url]; ok {
+				continue servers
+			}
+		}
+		ldr.Logger.Warnf("keepstore configured at %s does not have access to any volumes", url)
+	}
+	return nil
+}
+
+// Warn about AccessViaHosts entries that don't correspond to any of
+// the listed keepstore services.
+func (ldr *Loader) checkUnlistedKeepstores(cluster arvados.Cluster) error {
+	for uuid, vol := range cluster.Volumes {
+		if uuid == "SAMPLE" {
+			continue
+		}
+		for url := range vol.AccessViaHosts {
+			if _, ok := cluster.Services.Keepstore.InternalURLs[url]; !ok {
+				ldr.Logger.Warnf("Volumes.%s.AccessViaHosts refers to nonexistent keepstore server %s", uuid, url)
+			}
+		}
+	}
+	return nil
+}
diff --git a/lib/config/deprecated_keepstore_test.go b/lib/config/deprecated_keepstore_test.go
index 0948e2e2f..d1028e8bd 100644
--- a/lib/config/deprecated_keepstore_test.go
+++ b/lib/config/deprecated_keepstore_test.go
@@ -572,6 +572,35 @@ Volumes:
 	c.Check(ok, check.Equals, true)
 }
 
+// Ensure logs mention unmigrated servers.
+func (s *KeepstoreMigrationSuite) TestPendingKeepstoreMigrations(c *check.C) {
+	client := arvados.NewClientFromEnv()
+	for _, host := range []string{"keep0", "keep1"} {
+		err := client.RequestAndDecode(new(struct{}), "POST", "arvados/v1/keep_services", nil, map[string]interface{}{
+			"keep_service": map[string]interface{}{
+				"service_type": "disk",
+				"service_host": host + ".zzzzz.example.com",
+				"service_port": 25107,
+			},
+		})
+		c.Assert(err, check.IsNil)
+	}
+
+	port, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)
+	logs := s.logsWithKeepstoreConfig(c, `
+Listen: :`+strconv.Itoa(port)+`
+Volumes:
+- Type: S3
+  Endpoint: https://storage.googleapis.com
+  Bucket: foo
+`)
+	c.Check(logs, check.Matches, `(?ms).*you should remove the legacy keepstore config file.*`)
+	c.Check(logs, check.Matches, `(?ms).*you should migrate the legacy keepstore configuration file on host keep1.zzzzz.example.com.*`)
+	c.Check(logs, check.Not(check.Matches), `(?ms).*should migrate.*keep0.zzzzz.example.com.*`)
+	c.Check(logs, check.Matches, `(?ms).*keepstore configured at http://keep2.zzzzz.example.com:25107 does not have access to any volumes.*`)
+	c.Check(logs, check.Matches, `(?ms).*Volumes.zzzzz-nyw5e-possconfigerror.AccessViaHosts refers to nonexistent keepstore server http://keep00.zzzzz.example.com:25107.*`)
+}
+
 const clusterConfigForKeepstoreMigrationTest = `
 Clusters:
   zzzzz:
@@ -580,6 +609,8 @@ Clusters:
       Keepstore:
         InternalURLs:
           "http://{{.hostname}}:12345": {}
+          "http://keep0.zzzzz.example.com:25107": {}
+          "http://keep2.zzzzz.example.com:25107": {}
       Controller:
         ExternalURL: "https://{{.controller}}"
     TLS:
@@ -598,7 +629,7 @@ Clusters:
 
       zzzzz-nyw5e-readonlyonother:
         AccessViaHosts:
-          "http://other.host.example:12345": {ReadOnly: true}
+          "http://keep0.zzzzz.example.com:25107": {ReadOnly: true}
         Driver: S3
         DriverParameters:
           Endpoint: https://storage.googleapis.com
@@ -608,7 +639,7 @@ Clusters:
 
       zzzzz-nyw5e-writableonother:
         AccessViaHosts:
-          "http://other.host.example:12345": {}
+          "http://keep0.zzzzz.example.com:25107": {}
         Driver: S3
         DriverParameters:
           Endpoint: https://storage.googleapis.com
@@ -617,6 +648,8 @@ Clusters:
         Replication: 3
 
       zzzzz-nyw5e-localfilesystem:
+        AccessViaHosts:
+          "http://keep0.zzzzz.example.com:25107": {}
         Driver: Directory
         DriverParameters:
           Root: /data/sdd
@@ -629,16 +662,23 @@ Clusters:
         DriverParameters:
           Root: /data/sde
         Replication: 1
+
+      zzzzz-nyw5e-possconfigerror:
+        AccessViaHosts:
+          "http://keep00.zzzzz.example.com:25107": {}
+        Driver: Directory
+        DriverParameters:
+          Root: /data/sdf
+        Replication: 1
 `
 
 // Determine the effect of combining the given legacy keepstore config
 // YAML (just the "Volumes" entries of an old keepstore config file)
 // with the example clusterConfigForKeepstoreMigrationTest config.
 //
-// Return two Volumes configs -- one without loading
-// keepstoreconfigdata ("before") and one with ("after") -- for the
-// caller to compare.
-func (s *KeepstoreMigrationSuite) loadWithKeepstoreConfig(c *check.C, keepstoreVolumesYAML string) (before, after map[string]arvados.Volume) {
+// Return two Volumes configs -- one without loading keepstoreYAML
+// ("before") and one with ("after") -- for the caller to compare.
+func (s *KeepstoreMigrationSuite) loadWithKeepstoreConfig(c *check.C, keepstoreYAML string) (before, after map[string]arvados.Volume) {
 	ldr := testLoader(c, s.clusterConfigYAML(c), nil)
 	cBefore, err := ldr.Load()
 	c.Assert(err, check.IsNil)
@@ -646,7 +686,7 @@ func (s *KeepstoreMigrationSuite) loadWithKeepstoreConfig(c *check.C, keepstoreV
 	keepstoreconfig, err := ioutil.TempFile("", "")
 	c.Assert(err, check.IsNil)
 	defer os.Remove(keepstoreconfig.Name())
-	io.WriteString(keepstoreconfig, keepstoreVolumesYAML)
+	io.WriteString(keepstoreconfig, keepstoreYAML)
 
 	ldr = testLoader(c, s.clusterConfigYAML(c), nil)
 	ldr.KeepstorePath = keepstoreconfig.Name()
@@ -656,6 +696,24 @@ func (s *KeepstoreMigrationSuite) loadWithKeepstoreConfig(c *check.C, keepstoreV
 	return cBefore.Clusters["zzzzz"].Volumes, cAfter.Clusters["zzzzz"].Volumes
 }
 
+// Return the log messages emitted when loading keepstoreYAML along
+// with clusterConfigForKeepstoreMigrationTest.
+func (s *KeepstoreMigrationSuite) logsWithKeepstoreConfig(c *check.C, keepstoreYAML string) string {
+	var logs bytes.Buffer
+
+	keepstoreconfig, err := ioutil.TempFile("", "")
+	c.Assert(err, check.IsNil)
+	defer os.Remove(keepstoreconfig.Name())
+	io.WriteString(keepstoreconfig, keepstoreYAML)
+
+	ldr := testLoader(c, s.clusterConfigYAML(c), &logs)
+	ldr.KeepstorePath = keepstoreconfig.Name()
+	_, err = ldr.Load()
+	c.Assert(err, check.IsNil)
+
+	return logs.String()
+}
+
 func (s *KeepstoreMigrationSuite) clusterConfigYAML(c *check.C) string {
 	hostname, err := os.Hostname()
 	c.Assert(err, check.IsNil)
diff --git a/lib/config/load.go b/lib/config/load.go
index 7e4849393..db4bc9f59 100644
--- a/lib/config/load.go
+++ b/lib/config/load.go
@@ -259,9 +259,15 @@ func (ldr *Loader) Load() (*arvados.Config, error) {
 
 	// Check for known mistakes
 	for id, cc := range cfg.Clusters {
-		err = checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection)
-		if err != nil {
-			return nil, err
+		for _, err = range []error{
+			checkKeyConflict(fmt.Sprintf("Clusters.%s.PostgreSQL.Connection", id), cc.PostgreSQL.Connection),
+			ldr.checkPendingKeepstoreMigrations(cc),
+			ldr.checkEmptyKeepstores(cc),
+			ldr.checkUnlistedKeepstores(cc),
+		} {
+			if err != nil {
+				return nil, err
+			}
 		}
 	}
 	return &cfg, nil
diff --git a/sdk/go/arvados/keep_service.go b/sdk/go/arvados/keep_service.go
index e0ae1758d..97a62fa7b 100644
--- a/sdk/go/arvados/keep_service.go
+++ b/sdk/go/arvados/keep_service.go
@@ -10,16 +10,19 @@ import (
 	"net/http"
 	"strconv"
 	"strings"
+	"time"
 )
 
 // KeepService is an arvados#keepService record
 type KeepService struct {
-	UUID           string `json:"uuid"`
-	ServiceHost    string `json:"service_host"`
-	ServicePort    int    `json:"service_port"`
-	ServiceSSLFlag bool   `json:"service_ssl_flag"`
-	ServiceType    string `json:"service_type"`
-	ReadOnly       bool   `json:"read_only"`
+	UUID           string    `json:"uuid"`
+	ServiceHost    string    `json:"service_host"`
+	ServicePort    int       `json:"service_port"`
+	ServiceSSLFlag bool      `json:"service_ssl_flag"`
+	ServiceType    string    `json:"service_type"`
+	ReadOnly       bool      `json:"read_only"`
+	CreatedAt      time.Time `json:"created_at"`
+	ModifiedAt     time.Time `json:"modified_at"`
 }
 
 type KeepMount struct {

commit 32d0b751985b6e8adef29a71c3e4542e87f7c54f
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Sep 17 13:28:48 2019 -0400

    13647: Add upgrade note about keepstore command line flags.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/doc/admin/upgrading.html.textile.liquid b/doc/admin/upgrading.html.textile.liquid
index e1e29ed12..b98a9e2dc 100644
--- a/doc/admin/upgrading.html.textile.liquid
+++ b/doc/admin/upgrading.html.textile.liquid
@@ -45,9 +45,13 @@ h4. Arv-git-httpd configuration migration
 
 (feature "#14712":https://dev.arvados.org/issues/14712 ) The arv-git-httpd package can now be configured using the centralized configuration file at @/etc/arvados/config.yml at . Configuration via individual command line arguments is no longer available. Please see "arv-git-httpd's config migration guide":{{site.baseurl}}/admin/config-migration.html#arv-git-httpd for more details.
 
-h4. Keep-web dropped support on command line flags configuration
+h4. Keepstore and keep-web no longer support configuration via command line flags
 
-As we're migrating to a central cluster configuration file, the already deprecated way of getting configurations via environment variables and command line flags isn't valid anymore. Current keep-web supports both the now legacy @keep-web.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check "keep-web's install guide":{{site.baseurl}}/install/install-keep-web.html for more details.
+As we're migrating to a central cluster configuration file, the already deprecated way of getting configurations via environment variables and command line flags isn't valid anymore.
+
+Current keep-web supports both the now legacy @keep-web.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check "keep-web's install guide":{{site.baseurl}}/install/install-keep-web.html for more details.
+
+Current keepstore supports both the now legacy @keepstore.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check "keepstore's install guide":{{site.baseurl}}/install/install-keepstore.html for more details.
 
 h4. Jobs API is read-only
 

commit 0e0f69c6164339f4d0babb1d9d5a68fae24c01d5
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Sep 17 13:25:45 2019 -0400

    13647: Update config keywords in docs, comments, and error messages.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/doc/install/install-keepstore.html.textile.liquid b/doc/install/install-keepstore.html.textile.liquid
index 157ac589b..a16de5162 100644
--- a/doc/install/install-keepstore.html.textile.liquid
+++ b/doc/install/install-keepstore.html.textile.liquid
@@ -83,11 +83,11 @@ h3. Notes on storage management
 
 On its own, a keepstore server never deletes data.  The "keep-balance":install-keep-balance.html service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash.
 
-When a block is newly written, it is protected from deletion for the duration in @BlobSignatureTTL at .  During this time, it cannot be trashed.
+When a block is newly written, it is protected from deletion for the duration in @BlobSigningTTL at .  During this time, it cannot be trashed or deleted.
 
-If keep-balance instructs keepstore to trash a block which is older than @BlobSignatureTTL@, and @EnableDelete@ is true, the block will be moved to "trash".  A block which is in the trash is no longer accessible by read requests, but has not yet been permanently deleted.  Blocks which are in the trash may be recovered using the "untrash" API endpoint.  Blocks are permanently deleted after they have been in the trash for the duration in @TrashLifetime at .
+If keep-balance instructs keepstore to trash a block which is older than @BlobSigningTTL@, and @BlobTrashLifetime@ is non-zero, the block will be moved to "trash".  A block which is in the trash is no longer accessible by read requests, but has not yet been permanently deleted.  Blocks which are in the trash may be recovered using the "untrash" API endpoint.  Blocks are permanently deleted after they have been in the trash for the duration in @BlobTrashLifetime at .
 
-Keep-balance is also responsible for balancing the distribution of blocks across keepstore servers by asking servers to pull blocks from other servers (as determined by their "storage class":{{site.baseurl}}/admin/storage-classes.html and "rendezvous hashing order":{{site.baseurl}}/api/storage.html).  Pulling a block makes a copy.  If a block is overreplicated (i.e. there are excess copies) after pulling, it will be subsequently trashed on the original server.
+Keep-balance is also responsible for balancing the distribution of blocks across keepstore servers by asking servers to pull blocks from other servers (as determined by their "storage class":{{site.baseurl}}/admin/storage-classes.html and "rendezvous hashing order":{{site.baseurl}}/api/storage.html).  Pulling a block makes a copy.  If a block is overreplicated (i.e. there are excess copies) after pulling, it will be subsequently trashed and deleted on the original server, subject to @BlobTrash@ and @BlobTrashLifetime@ settings.
 
 h3. Configure storage volumes
 
diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index b52b706b2..6806c8c6c 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -497,7 +497,7 @@ func (v *AzureBlobVolume) Trash(loc string) error {
 		return nil
 	}
 
-	// If TrashLifetime == 0, just delete it
+	// If BlobTrashLifetime == 0, just delete it
 	if v.cluster.Collections.BlobTrashLifetime == 0 {
 		return v.container.DeleteBlob(loc, &storage.DeleteBlobOptions{
 			IfMatch: props.Etag,
@@ -567,7 +567,7 @@ func (v *AzureBlobVolume) isKeepBlock(s string) bool {
 	return keepBlockRegexp.MatchString(s)
 }
 
-// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
+// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
 func (v *AzureBlobVolume) EmptyTrash() {
 	if v.cluster.Collections.BlobDeleteConcurrency < 1 {
diff --git a/services/keepstore/command.go b/services/keepstore/command.go
index 51031d360..c589e639f 100644
--- a/services/keepstore/command.go
+++ b/services/keepstore/command.go
@@ -149,13 +149,13 @@ func (h *handler) setup(ctx context.Context, cluster *arvados.Cluster, token str
 	h.Cluster = cluster
 	h.Logger = ctxlog.FromContext(ctx)
 	if h.Cluster.API.MaxKeepBlobBuffers <= 0 {
-		return fmt.Errorf("MaxBuffers must be greater than zero")
+		return fmt.Errorf("API.MaxKeepBlobBuffers must be greater than zero")
 	}
 	bufs = newBufferPool(h.Logger, h.Cluster.API.MaxKeepBlobBuffers, BlockSize)
 
 	if h.Cluster.API.MaxConcurrentRequests < 1 {
 		h.Cluster.API.MaxConcurrentRequests = h.Cluster.API.MaxKeepBlobBuffers * 2
-		h.Logger.Warnf("MaxRequests <1 or not specified; defaulting to MaxKeepBlobBuffers * 2 == %d", h.Cluster.API.MaxConcurrentRequests)
+		h.Logger.Warnf("API.MaxConcurrentRequests <1 or not specified; defaulting to MaxKeepBlobBuffers * 2 == %d", h.Cluster.API.MaxConcurrentRequests)
 	}
 
 	if h.Cluster.Collections.BlobSigningKey != "" {
diff --git a/services/keepstore/handler_test.go b/services/keepstore/handler_test.go
index 299460297..9d69b9fa4 100644
--- a/services/keepstore/handler_test.go
+++ b/services/keepstore/handler_test.go
@@ -327,7 +327,7 @@ func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
 //   - authenticated   /index/prefix request | superuser
 //
 // The only /index requests that should succeed are those issued by the
-// superuser. They should pass regardless of the value of RequireSignatures.
+// superuser. They should pass regardless of the value of BlobSigning.
 //
 func (s *HandlerSuite) TestIndexHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
@@ -393,7 +393,7 @@ func (s *HandlerSuite) TestIndexHandler(c *check.C) {
 	// => UnauthorizedError
 	response := IssueRequest(s.handler, unauthenticatedReq)
 	ExpectStatusCode(c,
-		"RequireSignatures on, unauthenticated request",
+		"permissions on, unauthenticated request",
 		UnauthorizedError.HTTPCode,
 		response)
 
@@ -520,7 +520,7 @@ func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
 	vols := s.handler.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 
-	// Explicitly set the BlobSignatureTTL to 0 for these
+	// Explicitly set the BlobSigningTTL to 0 for these
 	// tests, to ensure the MockVolume deletes the blocks
 	// even though they have just been created.
 	s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
@@ -611,7 +611,7 @@ func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
 		c.Error("superuserExistingBlockReq: block not deleted")
 	}
 
-	// A DELETE request on a block newer than BlobSignatureTTL
+	// A DELETE request on a block newer than BlobSigningTTL
 	// should return success but leave the block on the volume.
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 	s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
@@ -936,7 +936,7 @@ func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
 	select {
 	case <-ok:
 	case <-time.After(time.Second):
-		c.Fatal("PUT deadlocks with MaxBuffers==1")
+		c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
 	}
 }
 
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 22a38e208..9fc87045e 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -113,7 +113,7 @@ const (
 var (
 	// ErrS3TrashDisabled is returned by Trash if that operation
 	// is impossible with the current config.
-	ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because -trash-lifetime=0 and -s3-unsafe-delete=false")
+	ErrS3TrashDisabled = fmt.Errorf("trash function is disabled because Collections.BlobTrashLifetime=0 and DriverParameters.UnsafeDelete=false")
 
 	s3ACL = s3.Private
 
@@ -663,7 +663,7 @@ func (v *S3Volume) translateError(err error) error {
 	return err
 }
 
-// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
+// EmptyTrash looks for trashed blocks that exceeded BlobTrashLifetime
 // and deletes them from the volume.
 func (v *S3Volume) EmptyTrash() {
 	if v.cluster.Collections.BlobDeleteConcurrency < 1 {
@@ -712,8 +712,8 @@ func (v *S3Volume) EmptyTrash() {
 				// the raceWindow that starts if we
 				// delete trash/X now.
 				//
-				// Note this means (TrashCheckInterval
-				// < BlobSignatureTTL - raceWindow) is
+				// Note this means (TrashSweepInterval
+				// < BlobSigningTTL - raceWindow) is
 				// necessary to avoid starvation.
 				log.Printf("notice: %s: EmptyTrash: detected old race for %q, calling fixRace + Touch", v, loc)
 				v.fixRace(loc)
diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go
index b8c4458a5..5c639d629 100644
--- a/services/keepstore/s3_volume_test.go
+++ b/services/keepstore/s3_volume_test.go
@@ -316,12 +316,12 @@ func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
 			false, false, false, true, false, false,
 		},
 		{
-			"Erroneously trashed during a race, detected before TrashLifetime",
+			"Erroneously trashed during a race, detected before BlobTrashLifetime",
 			none, t0.Add(-30 * time.Minute), t0.Add(-29 * time.Minute),
 			true, false, true, true, true, false,
 		},
 		{
-			"Erroneously trashed during a race, rescue during EmptyTrash despite reaching TrashLifetime",
+			"Erroneously trashed during a race, rescue during EmptyTrash despite reaching BlobTrashLifetime",
 			none, t0.Add(-90 * time.Minute), t0.Add(-89 * time.Minute),
 			true, false, true, true, true, false,
 		},
diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index 918555c2b..6504f9c16 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -418,9 +418,9 @@ func (v *UnixVolume) IndexTo(prefix string, w io.Writer) error {
 }
 
 // Trash trashes the block data from the unix storage
-// If TrashLifetime == 0, the block is deleted
+// If BlobTrashLifetime == 0, the block is deleted
 // Else, the block is renamed as path/{loc}.trash.{deadline},
-// where deadline = now + TrashLifetime
+// where deadline = now + BlobTrashLifetime
 func (v *UnixVolume) Trash(loc string) error {
 	// Touch() must be called before calling Write() on a block.  Touch()
 	// also uses lockfile().  This avoids a race condition between Write()
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
index 861435502..1dea6194d 100644
--- a/services/keepstore/volume.go
+++ b/services/keepstore/volume.go
@@ -170,12 +170,12 @@ type Volume interface {
 
 	// Trash moves the block data from the underlying storage
 	// device to trash area. The block then stays in trash for
-	// -trash-lifetime interval before it is actually deleted.
+	// BlobTrashLifetime before it is actually deleted.
 	//
 	// loc is as described in Get.
 	//
 	// If the timestamp for the given locator is newer than
-	// BlobSignatureTTL, Trash must not trash the data.
+	// BlobSigningTTL, Trash must not trash the data.
 	//
 	// If a Trash operation overlaps with any Touch or Put
 	// operations on the same locator, the implementation must
@@ -196,8 +196,7 @@ type Volume interface {
 	// reliably or fail outright.
 	//
 	// Corollary: A successful Touch or Put guarantees a block
-	// will not be trashed for at least BlobSignatureTTL
-	// seconds.
+	// will not be trashed for at least BlobSigningTTL seconds.
 	Trash(loc string) error
 
 	// Untrash moves block from trash back into store
@@ -216,8 +215,8 @@ type Volume interface {
 	// secrets.
 	String() string
 
-	// EmptyTrash looks for trashed blocks that exceeded TrashLifetime
-	// and deletes them from the volume.
+	// EmptyTrash looks for trashed blocks that exceeded
+	// BlobTrashLifetime and deletes them from the volume.
 	EmptyTrash()
 
 	// Return a globally unique ID of the underlying storage
diff --git a/services/keepstore/volume_generic_test.go b/services/keepstore/volume_generic_test.go
index 683521c01..cbb0eb3a3 100644
--- a/services/keepstore/volume_generic_test.go
+++ b/services/keepstore/volume_generic_test.go
@@ -487,8 +487,8 @@ func (s *genericVolumeSuite) testDeleteNewBlock(t TB, factory TestableVolumeFact
 }
 
 // Calling Delete() for a block with a timestamp older than
-// BlobSignatureTTL seconds in the past should delete the data.
-// Test is intended for only writable volumes
+// BlobSigningTTL seconds in the past should delete the data.  Test is
+// intended for only writable volumes
 func (s *genericVolumeSuite) testDeleteOldBlock(t TB, factory TestableVolumeFactory) {
 	s.setup(t)
 	s.cluster.Collections.BlobSigningTTL.Set("5m")
@@ -834,7 +834,7 @@ func (s *genericVolumeSuite) testPutFullBlock(t TB, factory TestableVolumeFactor
 	}
 }
 
-// With TrashLifetime != 0, perform:
+// With BlobTrashLifetime != 0, perform:
 // Trash an old block - which either raises ErrNotImplemented or succeeds
 // Untrash -  which either raises ErrNotImplemented or succeeds
 // Get - which must succeed
@@ -940,7 +940,8 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	err = v.Trash(TestHash)
 	if err == MethodDisabledError || err == ErrNotImplemented {
 		// Skip the trash tests for read-only volumes, and
-		// volume types that don't support TrashLifetime>0.
+		// volume types that don't support
+		// BlobTrashLifetime>0.
 		return
 	}
 

commit 453fe7852de9b01a10ace4ded7b3b7825326e599
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Sep 17 11:33:36 2019 -0400

    13647: Improve error message.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/lib/config/deprecated_keepstore.go b/lib/config/deprecated_keepstore.go
index 43ca30a00..1e44052a6 100644
--- a/lib/config/deprecated_keepstore.go
+++ b/lib/config/deprecated_keepstore.go
@@ -143,7 +143,7 @@ func (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {
 			myURL.Host = net.JoinHostPort(hostname, (*v)[1:])
 			cluster.Services.Keepstore.InternalURLs[myURL] = arvados.ServiceInstance{}
 		} else {
-			return fmt.Errorf("unable to migrate Listen value %q from legacy keepstore config file -- remove after configuring Services.Keepstore.InternalURLs.", *v)
+			return fmt.Errorf("unable to migrate Listen value %q -- you must update Services.Keepstore.InternalURLs manually, and comment out the Listen entry in your legacy keepstore config file", *v)
 		}
 	} else {
 		for url := range cluster.Services.Keepstore.InternalURLs {

commit fc86370e31e1bb3a5d7b234419e29015c165bc67
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Sep 17 11:29:59 2019 -0400

    13647: Recommend using a well supported server/volume layout.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/doc/install/install-keepstore.html.textile.liquid b/doc/install/install-keepstore.html.textile.liquid
index 528d52f91..157ac589b 100644
--- a/doc/install/install-keepstore.html.textile.liquid
+++ b/doc/install/install-keepstore.html.textile.liquid
@@ -11,6 +11,15 @@ SPDX-License-Identifier: CC-BY-SA-3.0
 
 Keepstore provides access to underlying storage for reading and writing content-addressed blocks, with enforcement of Arvados permissions.  Keepstore supports a variety of cloud object storage and POSIX filesystems for its backing store.
 
+h3. Plan your storage layout
+
+In the steps below, you will configure a number of backend storage volumes (like local filesystems and S3 buckets) and specify which keepstore servers have read-only and read-write access to which volumes.
+
+It is possible to configure arbitrary server/volume layouts. However, in order to provide good performance and efficient use of storage resources, we strongly recommend using one of the following layouts:
+
+# Each volume is writable by exactly one server, and optionally readable by one or more other servers. The total capacity of all writable volumes is the same for each server.
+# Each volume is writable by all servers. Each volume has enough built-in redundancy to satisfy your requirements, i.e., you do not need Arvados to mirror data across multiple volumes.
+
 We recommend starting off with two Keepstore servers.  Exact server specifications will be site and workload specific, but in general keepstore will be I/O bound and should be set up to maximize aggregate bandwidth with compute nodes.  To increase capacity (either space or throughput) it is straightforward to add additional servers, or (in cloud environments) to increase the machine size of the existing servers.
 
 By convention, we use the following hostname pattern:

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list