[arvados] updated: 2.7.0-5894-g328d837ec0

git repository hosting git at public.arvados.org
Fri Feb 9 16:30:46 UTC 2024


Summary of changes:
 services/keep-balance/change_set.go                |   15 +-
 services/keepstore/azure_blob_volume.go            |   48 +-
 services/keepstore/azure_blob_volume_test.go       |  172 +--
 services/keepstore/bufferpool_test.go              |   11 +-
 services/keepstore/command.go                      |    6 +-
 services/keepstore/count.go                        |    6 +-
 services/keepstore/handler_test.go                 | 1405 --------------------
 services/keepstore/hashcheckwriter.go              |   68 +
 services/keepstore/keepstore.go                    |  160 ++-
 services/keepstore/keepstore_test.go               |  826 ++++++++++++
 services/keepstore/metrics_test.go                 |   87 ++
 services/keepstore/mounts_test.go                  |  175 +--
 services/keepstore/proxy_remote_test.go            |   43 +-
 services/keepstore/pull_worker.go                  |  119 +-
 services/keepstore/pull_worker_integration_test.go |  118 --
 services/keepstore/pull_worker_test.go             |  393 ++----
 services/keepstore/router.go                       |   71 +-
 services/keepstore/router_test.go                  |  403 ++++++
 services/keepstore/s3aws_volume.go                 |   32 +-
 services/keepstore/s3aws_volume_test.go            |  100 +-
 services/keepstore/status_test.go                  |   10 +-
 services/keepstore/streamwriterat.go               |   12 +-
 services/keepstore/streamwriterat_test.go          |   40 +-
 services/keepstore/trash_worker.go                 |  149 ++-
 services/keepstore/trash_worker_test.go            |  509 +++----
 services/keepstore/unix_volume.go                  |   88 +-
 services/keepstore/unix_volume_test.go             |  281 ++--
 services/keepstore/volume.go                       |    9 +-
 services/keepstore/volume_generic_test.go          |  631 +++------
 services/keepstore/volume_test.go                  |  233 +---
 services/keepstore/work_queue.go                   |  208 ---
 services/keepstore/work_queue_test.go              |  244 ----
 32 files changed, 2652 insertions(+), 4020 deletions(-)
 delete mode 100644 services/keepstore/handler_test.go
 create mode 100644 services/keepstore/hashcheckwriter.go
 create mode 100644 services/keepstore/keepstore_test.go
 create mode 100644 services/keepstore/metrics_test.go
 delete mode 100644 services/keepstore/pull_worker_integration_test.go
 create mode 100644 services/keepstore/router_test.go
 delete mode 100644 services/keepstore/work_queue.go
 delete mode 100644 services/keepstore/work_queue_test.go

       via  328d837ec0ec33b04a1298d2224c804ceb4fe91e (commit)
       via  f6a02b68697a41e95ea3a5d69a2a5b46351ac0ec (commit)
       via  99ad1527cffd776ef7f9a3d46cf3ed6b2188d010 (commit)
       via  f1c1f626326db5387c75b59708461da453054de9 (commit)
       via  5c7271dd25c43eb086382f9069b84c7b54942d63 (commit)
       via  ff875a23189a96e7d2109efdc5b030ad86002efe (commit)
       via  a99ab0bc3c6e8f75d8ca87053c300cf3c79ba1b4 (commit)
       via  3e19bd4f2c24ddeabb6a219c5995e0250d1caf7b (commit)
       via  cdda4e54d99a079773cbf68b005bdbf0a977408b (commit)
      from  bf2113bc6cd3302e326a0903a55b0222b7215a10 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 328d837ec0ec33b04a1298d2224c804ceb4fe91e
Author: Tom Clegg <tom at curii.com>
Date:   Fri Feb 9 11:30:02 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 9471936bc2..d90d7a5b75 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -176,6 +176,12 @@ func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
 			ks.mountsW = append(ks.mountsW, mnt)
 		}
 	}
+	// Sorting mounts by UUID makes behavior more predictable, and
+	// is convenient for testing -- for example, "index all
+	// volumes" and "trash block on all volumes" will visit
+	// volumes in predictable order.
+	sort.Slice(ks.mountsR, func(i, j int) bool { return ks.mountsR[i].UUID < ks.mountsR[j].UUID })
+	sort.Slice(ks.mountsW, func(i, j int) bool { return ks.mountsW[i].UUID < ks.mountsW[j].UUID })
 	return nil
 }
 
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
index 2d66014a62..885e9937db 100644
--- a/services/keepstore/keepstore_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -329,11 +329,6 @@ func (s *keepstoreSuite) TestBlockTouch(c *C) {
 	c.Fatal("todo")
 }
 
-func (s *keepstoreSuite) TestIndex(c *C) {
-	c.Fatal("todo: entire index")
-	c.Fatal("todo: specified prefix")
-}
-
 func (s *keepstoreSuite) TestBlockTrash(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
@@ -489,7 +484,6 @@ func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
 }
 
 func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
-	c.Error("todo: volume with no specified classes implies 'default'")
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"}, // "default" is implicit
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"special": true, "extra": true}},
@@ -500,26 +494,36 @@ func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 	ctx := authContext(arvadostest.ActiveTokenV2)
 
 	for _, trial := range []struct {
-		ask    []string
-		expect map[string]int
+		ask            []string
+		expectReplicas int
+		expectClasses  map[string]int
 	}{
 		{nil,
+			1,
 			map[string]int{"default": 1}},
 		{[]string{},
+			1,
 			map[string]int{"default": 1}},
 		{[]string{"default"},
+			1,
 			map[string]int{"default": 1}},
 		{[]string{"default", "default"},
+			1,
 			map[string]int{"default": 1}},
 		{[]string{"special"},
+			1,
 			map[string]int{"extra": 1, "special": 1}},
 		{[]string{"special", "readonly"},
+			1,
 			map[string]int{"extra": 1, "special": 1}},
 		{[]string{"special", "nonexistent"},
+			1,
 			map[string]int{"extra": 1, "special": 1}},
 		{[]string{"extra", "special"},
+			1,
 			map[string]int{"extra": 1, "special": 1}},
 		{[]string{"default", "special"},
+			2,
 			map[string]int{"default": 1, "extra": 1, "special": 1}},
 	} {
 		c.Logf("success case %#v", trial)
@@ -531,12 +535,12 @@ func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 		if !c.Check(err, IsNil) {
 			continue
 		}
-		c.Check(resp.Replicas, Equals, 1)
-		if len(trial.expect) == 0 {
+		c.Check(resp.Replicas, Equals, trial.expectReplicas)
+		if len(trial.expectClasses) == 0 {
 			// any non-empty value is correct
 			c.Check(resp.StorageClasses, Not(HasLen), 0)
 		} else {
-			c.Check(resp.StorageClasses, DeepEquals, trial.expect)
+			c.Check(resp.StorageClasses, DeepEquals, trial.expectClasses)
 		}
 	}
 
@@ -555,11 +559,6 @@ func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 	}
 }
 
-func (s *keepstoreSuite) TestUntrashHandler(c *C) {
-	c.Check("resp", Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
-	c.Error("todo")
-}
-
 func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
 	for uuid, v := range s.cluster.Volumes {
 		v.ReadOnly = true
@@ -584,12 +583,24 @@ func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
 	}
 }
 
-func (s *keepstoreSuite) TestBlockWrite_SkipReadonly(c *C) {
-	c.Fatal("todo")
-}
+func (s *keepstoreSuite) TestBlockWrite_SkipReadOnly(c *C) {
+	s.cluster.Volumes = map[string]arvados.Volume{
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", ReadOnly: true},
+		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+	}
+	ks, cancel := testKeepstore(c, s.cluster, nil)
+	defer cancel()
+	ctx := authContext(arvadostest.ActiveTokenV2)
 
-func (s *keepstoreSuite) TestBlockRead_VolumeError503(c *C) {
-	c.Fatal("todo: return 503 ")
+	for i := range make([]byte, 32) {
+		data := []byte(fmt.Sprintf("block %d", i))
+		_, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{Data: data})
+		c.Assert(err, IsNil)
+	}
+	c.Check(ks.mounts["zzzzz-nyw5e-000000000000000"].volume.(*stubVolume).stubLog.String(), Matches, "(?ms).*write.*")
+	c.Check(ks.mounts["zzzzz-nyw5e-111111111111111"].volume.(*stubVolume).stubLog.String(), HasLen, 0)
+	c.Check(ks.mounts["zzzzz-nyw5e-222222222222222"].volume.(*stubVolume).stubLog.String(), HasLen, 0)
 }
 
 func init() {
diff --git a/services/keepstore/metrics_test.go b/services/keepstore/metrics_test.go
new file mode 100644
index 0000000000..0c8f1e68e6
--- /dev/null
+++ b/services/keepstore/metrics_test.go
@@ -0,0 +1,87 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+	"context"
+	"encoding/json"
+	"net/http"
+
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	"git.arvados.org/arvados.git/sdk/go/httpserver"
+	"github.com/prometheus/client_golang/prometheus"
+	. "gopkg.in/check.v1"
+)
+
+func (s *routerSuite) TestMetrics(c *C) {
+	reg := prometheus.NewRegistry()
+	router, cancel := testRouter(c, s.cluster, reg)
+	defer cancel()
+	instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+	handler := instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
+
+	router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+		Hash: fooHash,
+		Data: []byte("foo"),
+	})
+	router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+		Hash: barHash,
+		Data: []byte("bar"),
+	})
+
+	// prime the metrics by doing a no-op request
+	resp := call(handler, "GET", "/", "", nil, nil)
+
+	resp = call(handler, "GET", "/metrics.json", "", nil, nil)
+	c.Check(resp.Code, Equals, http.StatusUnauthorized)
+	resp = call(handler, "GET", "/metrics.json", "foobar", nil, nil)
+	c.Check(resp.Code, Equals, http.StatusForbidden)
+	resp = call(handler, "GET", "/metrics.json", arvadostest.ManagementToken, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	var j []struct {
+		Name   string
+		Help   string
+		Type   string
+		Metric []struct {
+			Label []struct {
+				Name  string
+				Value string
+			}
+			Summary struct {
+				SampleCount string
+				SampleSum   float64
+			}
+		}
+	}
+	json.NewDecoder(resp.Body).Decode(&j)
+	found := make(map[string]bool)
+	names := map[string]bool{}
+	for _, g := range j {
+		names[g.Name] = true
+		for _, m := range g.Metric {
+			if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
+				c.Check(m.Summary.SampleCount, Equals, "2")
+				found[g.Name] = true
+			}
+		}
+	}
+
+	metricsNames := []string{
+		"arvados_keepstore_bufferpool_inuse_buffers",
+		"arvados_keepstore_bufferpool_max_buffers",
+		"arvados_keepstore_bufferpool_allocated_bytes",
+		"arvados_keepstore_pull_queue_inprogress_entries",
+		"arvados_keepstore_pull_queue_pending_entries",
+		"arvados_keepstore_trash_queue_inprogress_entries",
+		"arvados_keepstore_trash_queue_pending_entries",
+		"request_duration_seconds",
+	}
+	for _, m := range metricsNames {
+		_, ok := names[m]
+		c.Check(ok, Equals, true, Commentf("checking metric %q", m))
+	}
+}
diff --git a/services/keepstore/mounts_test.go b/services/keepstore/mounts_test.go
index 71b83b6111..d29d5f6dc0 100644
--- a/services/keepstore/mounts_test.go
+++ b/services/keepstore/mounts_test.go
@@ -9,11 +9,6 @@ import (
 	"encoding/json"
 	"net/http"
 
-	"git.arvados.org/arvados.git/sdk/go/arvados"
-	"git.arvados.org/arvados.git/sdk/go/arvadostest"
-	"git.arvados.org/arvados.git/sdk/go/ctxlog"
-	"git.arvados.org/arvados.git/sdk/go/httpserver"
-	"github.com/prometheus/client_golang/prometheus"
 	. "gopkg.in/check.v1"
 )
 
@@ -88,68 +83,3 @@ func (s *routerSuite) TestMounts(c *C) {
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Body.String(), Equals, "\n")
 }
-
-func (s *routerSuite) TestMetrics(c *C) {
-	reg := prometheus.NewRegistry()
-	router, cancel := testRouter(c, s.cluster, reg)
-	defer cancel()
-	instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
-	handler := instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
-
-	router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
-		Hash: fooHash,
-		Data: []byte("foo"),
-	})
-	router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
-		Hash: barHash,
-		Data: []byte("bar"),
-	})
-	resp := call(handler, "GET", "/metrics.json", "", nil, nil)
-	c.Check(resp.Code, Equals, http.StatusUnauthorized)
-	resp = call(handler, "GET", "/metrics.json", "foobar", nil, nil)
-	c.Check(resp.Code, Equals, http.StatusForbidden)
-	resp = call(handler, "GET", "/metrics.json", arvadostest.ManagementToken, nil, nil)
-	c.Check(resp.Code, Equals, http.StatusOK)
-	var j []struct {
-		Name   string
-		Help   string
-		Type   string
-		Metric []struct {
-			Label []struct {
-				Name  string
-				Value string
-			}
-			Summary struct {
-				SampleCount string
-				SampleSum   float64
-			}
-		}
-	}
-	json.NewDecoder(resp.Body).Decode(&j)
-	found := make(map[string]bool)
-	names := map[string]bool{}
-	for _, g := range j {
-		names[g.Name] = true
-		for _, m := range g.Metric {
-			if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
-				c.Check(m.Summary.SampleCount, Equals, "2")
-				found[g.Name] = true
-			}
-		}
-	}
-
-	metricsNames := []string{
-		"arvados_keepstore_bufferpool_inuse_buffers",
-		"arvados_keepstore_bufferpool_max_buffers",
-		"arvados_keepstore_bufferpool_allocated_bytes",
-		"arvados_keepstore_pull_queue_inprogress_entries",
-		"arvados_keepstore_pull_queue_pending_entries",
-		"arvados_keepstore_trash_queue_inprogress_entries",
-		"arvados_keepstore_trash_queue_pending_entries",
-		"request_duration_seconds",
-	}
-	for _, m := range metricsNames {
-		_, ok := names[m]
-		c.Check(ok, Equals, true, Commentf("checking metric %q", m))
-	}
-}
diff --git a/services/keepstore/proxy_remote_test.go b/services/keepstore/proxy_remote_test.go
index 2d1335afbf..8479b5a4b4 100644
--- a/services/keepstore/proxy_remote_test.go
+++ b/services/keepstore/proxy_remote_test.go
@@ -19,13 +19,16 @@ import (
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
 	"git.arvados.org/arvados.git/sdk/go/auth"
+	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	"git.arvados.org/arvados.git/sdk/go/httpserver"
 	"git.arvados.org/arvados.git/sdk/go/keepclient"
+	"github.com/prometheus/client_golang/prometheus"
 	check "gopkg.in/check.v1"
 )
 
-var _ = check.Suite(&ProxyRemoteSuite{})
+var _ = check.Suite(&proxyRemoteSuite{})
 
-type ProxyRemoteSuite struct {
+type proxyRemoteSuite struct {
 	cluster *arvados.Cluster
 	handler *router
 
@@ -38,7 +41,7 @@ type ProxyRemoteSuite struct {
 	remoteAPI            *httptest.Server
 }
 
-func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
+func (s *proxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http.Request) {
 	expectToken, err := auth.SaltToken(arvadostest.ActiveTokenV2, s.remoteClusterID)
 	if err != nil {
 		panic(err)
@@ -55,7 +58,7 @@ func (s *ProxyRemoteSuite) remoteKeepproxyHandler(w http.ResponseWriter, r *http
 	http.Error(w, "404", 404)
 }
 
-func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
+func (s *proxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Request) {
 	host, port, _ := net.SplitHostPort(strings.Split(s.remoteKeepproxy.URL, "//")[1])
 	portnum, _ := strconv.Atoi(port)
 	if r.URL.Path == "/arvados/v1/discovery/v1/rest" {
@@ -79,10 +82,10 @@ func (s *ProxyRemoteSuite) remoteAPIHandler(w http.ResponseWriter, r *http.Reque
 	http.Error(w, "404", 404)
 }
 
-func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
+func (s *proxyRemoteSuite) SetUpTest(c *check.C) {
 	s.remoteClusterID = "z0000"
 	s.remoteBlobSigningKey = []byte("3b6df6fb6518afe12922a5bc8e67bf180a358bc8")
-	s.remoteKeepproxy = httptest.NewServer(http.HandlerFunc(s.remoteKeepproxyHandler))
+	s.remoteKeepproxy = httptest.NewServer(httpserver.LogRequests(http.HandlerFunc(s.remoteKeepproxyHandler)))
 	s.remoteAPI = httptest.NewUnstartedServer(http.HandlerFunc(s.remoteAPIHandler))
 	s.remoteAPI.StartTLS()
 	s.cluster = testCluster(c)
@@ -97,14 +100,17 @@ func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
 	s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "stub"}}
 }
 
-func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
+func (s *proxyRemoteSuite) TearDownTest(c *check.C) {
 	s.remoteAPI.Close()
 	s.remoteKeepproxy.Close()
 }
 
-func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
-	router, cancel := testRouter(c, s.cluster, nil)
+func (s *proxyRemoteSuite) TestProxyRemote(c *check.C) {
+	reg := prometheus.NewRegistry()
+	router, cancel := testRouter(c, s.cluster, reg)
 	defer cancel()
+	instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+	handler := httpserver.LogRequests(instrumented.ServeAPI(s.cluster.ManagementToken, instrumented))
 
 	data := []byte("foo bar")
 	s.remoteKeepData = data
@@ -181,9 +187,11 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
 			req.Header.Set("X-Keep-Signature", trial.xKeepSignature)
 		}
 		resp = httptest.NewRecorder()
-		router.ServeHTTP(resp, req)
+		handler.ServeHTTP(resp, req)
 		c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
-		c.Check(resp.Code, check.Equals, trial.expectCode)
+		if !c.Check(resp.Code, check.Equals, trial.expectCode) {
+			c.Logf("resp.Code %d came with resp.Body %q", resp.Code, resp.Body.String())
+		}
 		if resp.Code == http.StatusOK {
 			c.Check(resp.Body.String(), check.Equals, string(data))
 		} else {
@@ -206,7 +214,7 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
 		req = httptest.NewRequest("GET", "/"+locHdr, nil)
 		req.Header.Set("Authorization", "Bearer "+trial.token)
 		resp = httptest.NewRecorder()
-		router.ServeHTTP(resp, req)
+		handler.ServeHTTP(resp, req)
 		c.Check(resp.Code, check.Equals, http.StatusOK)
 		c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
 	}
diff --git a/services/keepstore/pull_worker.go b/services/keepstore/pull_worker.go
index 18379dc0cb..c131de02cb 100644
--- a/services/keepstore/pull_worker.go
+++ b/services/keepstore/pull_worker.go
@@ -8,6 +8,7 @@ import (
 	"bytes"
 	"context"
 	"sync"
+	"sync/atomic"
 
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadosclient"
@@ -22,13 +23,10 @@ type PullListItem struct {
 }
 
 type puller struct {
-	keepstore *keepstore
-	todo      []PullListItem
-	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
-
-	// For the benefit of test cases: if this channel is non-nil,
-	// send len(todo) to it after processing each pull list item.
-	notifyTodoLen chan int
+	keepstore  *keepstore
+	todo       []PullListItem
+	cond       *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+	inprogress atomic.Int64
 }
 
 func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *puller {
@@ -49,6 +47,17 @@ func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Regist
 			return float64(len(p.todo))
 		},
 	))
+	reg.MustRegister(prometheus.NewGaugeFunc(
+		prometheus.GaugeOpts{
+			Namespace: "arvados",
+			Subsystem: "keepstore",
+			Name:      "pull_queue_inprogress_entries",
+			Help:      "Number of pull requests in progress",
+		},
+		func() float64 {
+			return float64(p.inprogress.Load())
+		},
+	))
 	if len(p.keepstore.mountsW) == 0 {
 		keepstore.logger.Infof("not running pull worker because there are no writable volumes")
 		return p
@@ -76,6 +85,7 @@ func (p *puller) runWorker(ctx context.Context) {
 		p.keepstore.logger.Errorf("error setting up pull worker: %s", err)
 		return
 	}
+	c.AuthToken = "keepstore-token-used-for-pulling-data-from-same-cluster"
 	ac, err := arvadosclient.New(c)
 	if err != nil {
 		p.keepstore.logger.Errorf("error setting up pull worker: %s", err)
@@ -94,56 +104,65 @@ func (p *puller) runWorker(ctx context.Context) {
 	}()
 	for {
 		p.cond.L.Lock()
-		if len(p.todo) == 0 {
-			for len(p.todo) == 0 && ctx.Err() == nil {
-				p.cond.Wait()
-			}
-			if p.notifyTodoLen != nil {
-				p.notifyTodoLen <- len(p.todo)
-			}
+		for len(p.todo) == 0 && ctx.Err() == nil {
+			p.cond.Wait()
 		}
 		if ctx.Err() != nil {
 			return
 		}
 		item := p.todo[0]
 		p.todo = p.todo[1:]
+		p.inprogress.Add(1)
 		p.cond.L.Unlock()
 
-		var dst *mount
-		if item.MountUUID != "" {
-			dst = p.keepstore.mounts[item.MountUUID]
-			if dst == nil {
-				p.keepstore.logger.Warnf("ignoring pull list entry for nonexistent mount: %v", item)
-				continue
-			} else if !dst.AllowWrite {
-				p.keepstore.logger.Warnf("ignoring pull list entry for readonly mount: %v", item)
-				continue
+		func() {
+			defer p.inprogress.Add(-1)
+
+			logger := p.keepstore.logger.WithField("locator", item.Locator)
+
+			li, err := parseLocator(item.Locator)
+			if err != nil {
+				logger.Warn("ignoring pull request for invalid locator")
+				return
 			}
-		} else {
-			dst = p.keepstore.rendezvous(item.Locator, p.keepstore.mountsW)[0]
-		}
 
-		serviceRoots := make(map[string]string)
-		for _, addr := range item.Servers {
-			serviceRoots[addr] = addr
-		}
-		keepClient.SetServiceRoots(serviceRoots, nil, nil)
+			var dst *mount
+			if item.MountUUID != "" {
+				dst = p.keepstore.mounts[item.MountUUID]
+				if dst == nil {
+					logger.Warnf("ignoring pull list entry for nonexistent mount %s", item.MountUUID)
+					return
+				} else if !dst.AllowWrite {
+					logger.Warnf("ignoring pull list entry for readonly mount %s", item.MountUUID)
+					return
+				}
+			} else {
+				dst = p.keepstore.rendezvous(item.Locator, p.keepstore.mountsW)[0]
+			}
 
-		signedLocator := p.keepstore.signLocator(c.AuthToken, item.Locator)
+			serviceRoots := make(map[string]string)
+			for _, addr := range item.Servers {
+				serviceRoots[addr] = addr
+			}
+			keepClient.SetServiceRoots(serviceRoots, nil, nil)
 
-		buf := bytes.NewBuffer(nil)
-		_, err := keepClient.BlockRead(ctx, arvados.BlockReadOptions{
-			Locator: signedLocator,
-			WriteTo: buf,
-		})
-		if err != nil {
-			p.keepstore.logger.Warnf("error pulling data for pull list entry (%v): %s", item, err)
-			continue
-		}
-		err = dst.BlockWrite(ctx, item.Locator, buf.Bytes())
-		if err != nil {
-			p.keepstore.logger.Warnf("error writing data for pull list entry (%v): %s", item, err)
-			continue
-		}
+			signedLocator := p.keepstore.signLocator(c.AuthToken, item.Locator)
+
+			buf := bytes.NewBuffer(nil)
+			_, err = keepClient.BlockRead(ctx, arvados.BlockReadOptions{
+				Locator: signedLocator,
+				WriteTo: buf,
+			})
+			if err != nil {
+				logger.WithError(err).Warnf("error pulling data from remote servers (%s)", item.Servers)
+				return
+			}
+			err = dst.BlockWrite(ctx, li.hash, buf.Bytes())
+			if err != nil {
+				logger.WithError(err).Warnf("error writing data to %s", dst.UUID)
+				return
+			}
+			logger.Info("block pulled")
+		}()
 	}
 }
diff --git a/services/keepstore/pull_worker_test.go b/services/keepstore/pull_worker_test.go
index befe81096c..d109b56df3 100644
--- a/services/keepstore/pull_worker_test.go
+++ b/services/keepstore/pull_worker_test.go
@@ -5,26 +5,132 @@
 package keepstore
 
 import (
+	"bytes"
+	"context"
+	"crypto/md5"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"net/http/httptest"
+	"sort"
+	"time"
+
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"github.com/sirupsen/logrus"
 	. "gopkg.in/check.v1"
 )
 
-func (s *routerSuite) TestPullList_Clear(c *C) {
-	router, cancel := testRouter(c, s.cluster, nil)
+func (s *routerSuite) TestPullList_Execute(c *C) {
+	remotecluster := testCluster(c)
+	remotecluster.Volumes = map[string]arvados.Volume{
+		"zzzzz-nyw5e-rrrrrrrrrrrrrrr": {Replication: 1, Driver: "stub"},
+	}
+	remoterouter, cancel := testRouter(c, remotecluster, nil)
 	defer cancel()
+	remoteserver := httptest.NewServer(remoterouter)
+	defer remoteserver.Close()
 
-	c.Fatal("todo")
-	router.ServeHTTP(nil, nil)
-}
-
-func (s *routerSuite) TestPullList_Execute(c *C) {
 	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
-	c.Fatal("todo: pull available block to unspecified volume")
-	c.Fatal("todo: pull available block to specified volume")
-	c.Fatal("todo: log error when block not found on remote")
-	c.Fatal("todo: log error connecting to remote")
-	c.Fatal("todo: log error writing block to local mount")
-	c.Fatal("todo: log error when destination mount does not exist")
-	router.ServeHTTP(nil, nil)
+	executePullList := func(pullList []PullListItem) string {
+		var logbuf bytes.Buffer
+		logger := logrus.New()
+		logger.Out = &logbuf
+		router.keepstore.logger = logger
+
+		listjson, err := json.Marshal(pullList)
+		c.Assert(err, IsNil)
+		resp := call(router, "PUT", "http://example/pull", s.cluster.SystemRootToken, listjson, nil)
+		c.Check(resp.Code, Equals, http.StatusOK)
+		for {
+			router.puller.cond.L.Lock()
+			todolen := len(router.puller.todo)
+			router.puller.cond.L.Unlock()
+			if todolen == 0 && router.puller.inprogress.Load() == 0 {
+				break
+			}
+			time.Sleep(time.Millisecond)
+		}
+		return logbuf.String()
+	}
+
+	newRemoteBlock := func(datastring string) string {
+		data := []byte(datastring)
+		hash := fmt.Sprintf("%x", md5.Sum(data))
+		locator := fmt.Sprintf("%s+%d", hash, len(data))
+		_, err := remoterouter.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+			Hash: hash,
+			Data: data,
+		})
+		c.Assert(err, IsNil)
+		return locator
+	}
+
+	mounts := append([]*mount(nil), router.keepstore.mountsR...)
+	sort.Slice(mounts, func(i, j int) bool { return mounts[i].UUID < mounts[j].UUID })
+	var vols []*stubVolume
+	for _, mount := range mounts {
+		vols = append(vols, mount.volume.(*stubVolume))
+	}
+
+	ctx := authContext(arvadostest.ActiveTokenV2)
+
+	locator := newRemoteBlock("pull available block to unspecified volume")
+	executePullList([]PullListItem{{
+		Locator: locator,
+		Servers: []string{remoteserver.URL}}})
+	_, err := router.keepstore.BlockRead(ctx, arvados.BlockReadOptions{
+		Locator: router.keepstore.signLocator(arvadostest.ActiveTokenV2, locator),
+		WriteTo: io.Discard})
+	c.Check(err, IsNil)
+
+	locator0 := newRemoteBlock("pull available block to specified volume 0")
+	locator1 := newRemoteBlock("pull available block to specified volume 1")
+	executePullList([]PullListItem{
+		{
+			Locator:   locator0,
+			Servers:   []string{remoteserver.URL},
+			MountUUID: vols[0].params.UUID},
+		{
+			Locator:   locator1,
+			Servers:   []string{remoteserver.URL},
+			MountUUID: vols[1].params.UUID}})
+	c.Check(vols[0].data[locator0[:32]].data, NotNil)
+	c.Check(vols[1].data[locator1[:32]].data, NotNil)
+
+	locator = fooHash + "+3"
+	logs := executePullList([]PullListItem{{
+		Locator: locator,
+		Servers: []string{remoteserver.URL}}})
+	c.Check(logs, Matches, ".*error pulling data from remote servers.*Block not found.*locator=acbd.*\n")
+
+	locator = fooHash + "+3"
+	logs = executePullList([]PullListItem{{
+		Locator: locator,
+		Servers: []string{"http://0.0.0.0:9/"}}})
+	c.Check(logs, Matches, ".*error pulling data from remote servers.*connection refused.*locator=acbd.*\n")
+
+	locator = newRemoteBlock("log error writing to local volume")
+	vols[0].blockWrite = func(context.Context, string, []byte) error { return errors.New("test error") }
+	vols[1].blockWrite = vols[0].blockWrite
+	logs = executePullList([]PullListItem{{
+		Locator: locator,
+		Servers: []string{remoteserver.URL}}})
+	c.Check(logs, Matches, ".*error writing data to zzzzz-nyw5e-.*error=\"test error\".*locator=.*\n")
+	vols[0].blockWrite = nil
+	vols[1].blockWrite = nil
+
+	locator = newRemoteBlock("log error when destination mount does not exist")
+	logs = executePullList([]PullListItem{{
+		Locator:   locator,
+		Servers:   []string{remoteserver.URL},
+		MountUUID: "bogus-mount-uuid"}})
+	c.Check(logs, Matches, ".*ignoring pull list entry for nonexistent mount bogus-mount-uuid.*locator=.*\n")
+
+	logs = executePullList([]PullListItem{})
+	c.Logf("%s", logs)
 }
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
index 8be9b8a113..d06e5088ac 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/router_test.go
@@ -25,6 +25,15 @@ import (
 	. "gopkg.in/check.v1"
 )
 
+// routerSuite tests that the router correctly translates HTTP
+// requests to the appropriate keepstore functionality, and translates
+// the results to HTTP responses.
+type routerSuite struct {
+	cluster *arvados.Cluster
+}
+
+var _ = Suite(&routerSuite{})
+
 func testRouter(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*router, context.CancelFunc) {
 	if reg == nil {
 		reg = prometheus.NewRegistry()
@@ -40,12 +49,6 @@ func testRouter(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*rout
 	return newRouter(ks, puller, trasher).(*router), cancel
 }
 
-var _ = Suite(&routerSuite{})
-
-type routerSuite struct {
-	cluster *arvados.Cluster
-}
-
 func (s *routerSuite) SetUpTest(c *C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
@@ -249,9 +252,110 @@ func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
 	router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(_ context.Context, hash string, w io.Writer) (int, error) {
 		return 0, httpserver.ErrorWithStatus(errors.New("test error"), http.StatusBadGateway)
 	}
-	locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+
+	// To test whether we fall back to volume 1 after volume 0
+	// returns an error, we need to use a block whose rendezvous
+	// order has volume 0 first. Luckily "bar" is such a block.
+	c.Assert(router.keepstore.rendezvous(barHash, router.keepstore.mountsR)[0].UUID, DeepEquals, router.keepstore.mountsR[0].UUID)
+
+	locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, barHash+"+3")
+
+	// Volume 0 fails with an error that specifies an HTTP status
+	// code, so that code should be propagated to caller.
 	resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusBadGateway)
+	c.Check(resp.Body.String(), Equals, "test error\n")
+
+	c.Assert(router.keepstore.mountsW[1].volume.BlockWrite(context.Background(), barHash, []byte("bar")), IsNil)
+
+	// If the requested block is available on the second volume,
+	// it doesn't matter that the first volume failed.
+	resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Equals, "bar")
+}
+
+func (s *routerSuite) TestIndex(c *C) {
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+
+	resp := call(router, "GET", "http://example/index", s.cluster.SystemRootToken, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Equals, "\n")
+
+	resp = call(router, "GET", "http://example/index?prefix=fff", s.cluster.SystemRootToken, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Equals, "\n")
+
+	t0 := time.Now().Add(-time.Hour)
+	vol0 := router.keepstore.mounts["zzzzz-nyw5e-000000000000000"].volume.(*stubVolume)
+	err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+	c.Assert(err, IsNil)
+	err = vol0.blockTouchWithTime(fooHash, t0)
+	c.Assert(err, IsNil)
+	err = vol0.BlockWrite(context.Background(), barHash, []byte("bar"))
+	c.Assert(err, IsNil)
+	err = vol0.blockTouchWithTime(barHash, t0)
+	c.Assert(err, IsNil)
+	t1 := time.Now().Add(-time.Minute)
+	vol1 := router.keepstore.mounts["zzzzz-nyw5e-111111111111111"].volume.(*stubVolume)
+	err = vol1.BlockWrite(context.Background(), barHash, []byte("bar"))
+	c.Assert(err, IsNil)
+	err = vol1.blockTouchWithTime(barHash, t1)
+	c.Assert(err, IsNil)
+
+	for _, path := range []string{
+		"/index?prefix=acb",
+		"/index/acb",
+		"/index/?prefix=acb",
+		"/mounts/zzzzz-nyw5e-000000000000000/blocks?prefix=acb",
+		"/mounts/zzzzz-nyw5e-000000000000000/blocks/?prefix=acb",
+		"/mounts/zzzzz-nyw5e-000000000000000/blocks/acb",
+	} {
+		c.Logf("=== %s", path)
+		resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+		c.Check(resp.Code, Equals, http.StatusOK)
+		c.Check(resp.Body.String(), Equals, fooHash+"+3 "+fmt.Sprintf("%d", t0.UnixNano())+"\n\n")
+	}
+
+	for _, path := range []string{
+		"/index?prefix=37",
+		"/index/37",
+		"/index/?prefix=37",
+	} {
+		c.Logf("=== %s", path)
+		resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+		c.Check(resp.Code, Equals, http.StatusOK)
+		c.Check(resp.Body.String(), Equals, ""+
+			barHash+"+3 "+fmt.Sprintf("%d", t0.UnixNano())+"\n"+
+			barHash+"+3 "+fmt.Sprintf("%d", t1.UnixNano())+"\n\n")
+	}
+
+	for _, path := range []string{
+		"/mounts/zzzzz-nyw5e-111111111111111/blocks",
+		"/mounts/zzzzz-nyw5e-111111111111111/blocks/",
+		"/mounts/zzzzz-nyw5e-111111111111111/blocks?prefix=37",
+		"/mounts/zzzzz-nyw5e-111111111111111/blocks/?prefix=37",
+		"/mounts/zzzzz-nyw5e-111111111111111/blocks/37",
+	} {
+		c.Logf("=== %s", path)
+		resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+		c.Check(resp.Code, Equals, http.StatusOK)
+		c.Check(resp.Body.String(), Equals, barHash+"+3 "+fmt.Sprintf("%d", t1.UnixNano())+"\n\n")
+	}
+
+	for _, path := range []string{
+		"/index",
+		"/index?prefix=",
+		"/index/",
+		"/index/?prefix=",
+	} {
+		c.Logf("=== %s", path)
+		resp = call(router, "GET", "http://example"+path, s.cluster.SystemRootToken, nil, nil)
+		c.Check(resp.Code, Equals, http.StatusOK)
+		c.Check(strings.Split(resp.Body.String(), "\n"), HasLen, 5)
+	}
+
 }
 
 // Check that the context passed to a volume method gets cancelled
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
index 8da0a6ab52..d704c3a7d5 100644
--- a/services/keepstore/trash_worker.go
+++ b/services/keepstore/trash_worker.go
@@ -7,6 +7,7 @@ package keepstore
 import (
 	"context"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -20,13 +21,10 @@ type TrashListItem struct {
 }
 
 type trasher struct {
-	keepstore *keepstore
-	todo      []TrashListItem
-	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
-
-	// For the benefit of test cases: if this channel is non-nil,
-	// send len(todo) to it after processing each trash list item.
-	notifyTodoLen chan int
+	keepstore  *keepstore
+	todo       []TrashListItem
+	cond       *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+	inprogress atomic.Int64
 }
 
 func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *trasher {
@@ -47,6 +45,17 @@ func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Regis
 			return float64(len(t.todo))
 		},
 	))
+	reg.MustRegister(prometheus.NewGaugeFunc(
+		prometheus.GaugeOpts{
+			Namespace: "arvados",
+			Subsystem: "keepstore",
+			Name:      "trash_queue_inprogress_entries",
+			Help:      "Number of trash requests in progress",
+		},
+		func() float64 {
+			return float64(t.inprogress.Load())
+		},
+	))
 	if !keepstore.cluster.Collections.BlobTrash {
 		keepstore.logger.Info("not running trash worker because Collections.BlobTrash == false")
 		return t
@@ -82,16 +91,8 @@ func (t *trasher) runWorker(ctx context.Context, mntsAllowTrash []*mount) {
 	}()
 	for {
 		t.cond.L.Lock()
-		if t.notifyTodoLen != nil {
-			t.notifyTodoLen <- len(t.todo)
-		}
-		if len(t.todo) == 0 {
-			for len(t.todo) == 0 && ctx.Err() == nil {
-				t.cond.Wait()
-			}
-			if t.notifyTodoLen != nil {
-				t.notifyTodoLen <- len(t.todo)
-			}
+		for len(t.todo) == 0 && ctx.Err() == nil {
+			t.cond.Wait()
 		}
 		if ctx.Err() != nil {
 			t.cond.L.Unlock()
@@ -99,57 +100,61 @@ func (t *trasher) runWorker(ctx context.Context, mntsAllowTrash []*mount) {
 		}
 		item := t.todo[0]
 		t.todo = t.todo[1:]
+		t.inprogress.Add(1)
 		t.cond.L.Unlock()
 
-		logger := t.keepstore.logger.WithField("locator", item.Locator)
-
-		li, err := parseLocator(item.Locator)
-		if err != nil {
-			logger.Warn("ignoring trash request for invalid locator")
-			continue
-		}
-
-		reqMtime := time.Unix(0, item.BlockMtime)
-		if time.Since(reqMtime) < t.keepstore.cluster.Collections.BlobSigningTTL.Duration() {
-			logger.Warnf("client asked to delete a %v old block (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
-				arvados.Duration(time.Since(reqMtime)),
-				item.BlockMtime,
-				reqMtime,
-				t.keepstore.cluster.Collections.BlobSigningTTL)
-			continue
-		}
+		func() {
+			defer t.inprogress.Add(-1)
+			logger := t.keepstore.logger.WithField("locator", item.Locator)
 
-		var mnts []*mount
-		if item.MountUUID == "" {
-			mnts = mntsAllowTrash
-		} else if mnt := t.keepstore.mounts[item.MountUUID]; mnt == nil {
-			logger.Warnf("ignoring trash request for nonexistent mount %s", item.MountUUID)
-			continue
-		} else if !mnt.AllowTrash {
-			logger.Warnf("ignoring trash request for readonly mount %s with AllowTrashWhenReadOnly==false", item.MountUUID)
-			continue
-		} else {
-			mnts = []*mount{mnt}
-		}
-
-		for _, mnt := range mnts {
-			logger := logger.WithField("mount", mnt.UUID)
-			mtime, err := mnt.Mtime(li.hash)
+			li, err := parseLocator(item.Locator)
 			if err != nil {
-				logger.WithError(err).Error("error getting stored mtime")
-				continue
+				logger.Warn("ignoring trash request for invalid locator")
+				return
 			}
-			if !mtime.Equal(reqMtime) {
-				logger.Infof("stored mtime (%v) does not match trash list mtime (%v); skipping", mtime, reqMtime)
-				continue
+
+			reqMtime := time.Unix(0, item.BlockMtime)
+			if time.Since(reqMtime) < t.keepstore.cluster.Collections.BlobSigningTTL.Duration() {
+				logger.Warnf("client asked to delete a %v old block (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
+					arvados.Duration(time.Since(reqMtime)),
+					item.BlockMtime,
+					reqMtime,
+					t.keepstore.cluster.Collections.BlobSigningTTL)
+				return
 			}
-			err = mnt.BlockTrash(li.hash)
-			if err != nil {
-				logger.WithError(err).Info("error trashing block")
-				continue
+
+			var mnts []*mount
+			if item.MountUUID == "" {
+				mnts = mntsAllowTrash
+			} else if mnt := t.keepstore.mounts[item.MountUUID]; mnt == nil {
+				logger.Warnf("ignoring trash request for nonexistent mount %s", item.MountUUID)
+				return
+			} else if !mnt.AllowTrash {
+				logger.Warnf("ignoring trash request for readonly mount %s with AllowTrashWhenReadOnly==false", item.MountUUID)
+				return
+			} else {
+				mnts = []*mount{mnt}
 			}
-			logger.Info("block trashed")
-		}
+
+			for _, mnt := range mnts {
+				logger := logger.WithField("mount", mnt.UUID)
+				mtime, err := mnt.Mtime(li.hash)
+				if err != nil {
+					logger.WithError(err).Error("error getting stored mtime")
+					continue
+				}
+				if !mtime.Equal(reqMtime) {
+					logger.Infof("stored mtime (%v) does not match trash list mtime (%v); skipping", mtime, reqMtime)
+					continue
+				}
+				err = mnt.BlockTrash(li.hash)
+				if err != nil {
+					logger.WithError(err).Info("error trashing block")
+					continue
+				}
+				logger.Info("block trashed")
+			}
+		}()
 	}
 }
 
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
index bed20bf538..0c304dbade 100644
--- a/services/keepstore/trash_worker_test.go
+++ b/services/keepstore/trash_worker_test.go
@@ -177,29 +177,25 @@ func (s *routerSuite) TestTrashList_Execute(c *C) {
 			checks = append(checks, func() {
 				ent := vols[i].data[hash]
 				dataPresent := ent.data != nil && ent.trash.IsZero()
-				c.Check(dataPresent, Equals, expect, Commentf("%s mount %d expect %v got len(ent.data)=%d ent.trash=%v // %s", hash, i, expect, len(ent.data), ent.trash, trial.comment))
+				c.Check(dataPresent, Equals, expect, Commentf("%s mount %d (%s) expect present=%v but got len(ent.data)=%d ent.trash=%v // %s\nlog:\n%s", hash, i, vols[i].params.UUID, expect, len(ent.data), !ent.trash.IsZero(), trial.comment, vols[i].stubLog.String()))
 			})
 		}
 	}
 
-	router.trasher.notifyTodoLen = make(chan int)
-
 	listjson, err := json.Marshal(trashList)
 	resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, listjson, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 
-	for lenwas := -1; lenwas != 0; {
-		select {
-		case lenis := <-router.trasher.notifyTodoLen:
-			if lenis > 0 || lenwas > 0 {
-				lenwas = lenis
-			}
-		case <-time.After(time.Second):
-			c.Fatal("timed out")
+	for {
+		router.trasher.cond.L.Lock()
+		todolen := len(router.trasher.todo)
+		router.trasher.cond.L.Unlock()
+		if todolen == 0 && router.trasher.inprogress.Load() == 0 {
+			break
 		}
+		time.Sleep(time.Millisecond)
 	}
 
-	c.Logf("doing checks")
 	for _, check := range checks {
 		check()
 	}

commit f6a02b68697a41e95ea3a5d69a2a5b46351ac0ec
Author: Tom Clegg <tom at curii.com>
Date:   Thu Feb 8 15:06:27 2024 -0500

    2960: Remove unused code.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/work_queue.go b/services/keepstore/work_queue.go
deleted file mode 100644
index be3d118ff0..0000000000
--- a/services/keepstore/work_queue.go
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-/* A WorkQueue is an asynchronous thread-safe queue manager.  It
-   provides a channel from which items can be read off the queue, and
-   permits replacing the contents of the queue at any time.
-
-   The overall work flow for a WorkQueue is as follows:
-
-     1. A WorkQueue is created with NewWorkQueue().  This
-        function instantiates a new WorkQueue and starts a manager
-        goroutine.  The manager listens on an input channel
-        (manager.newlist) and an output channel (manager.NextItem).
-
-     2. The manager first waits for a new list of requests on the
-        newlist channel.  When another goroutine calls
-        manager.ReplaceQueue(lst), it sends lst over the newlist
-        channel to the manager.  The manager goroutine now has
-        ownership of the list.
-
-     3. Once the manager has this initial list, it listens on both the
-        input and output channels for one of the following to happen:
-
-          a. A worker attempts to read an item from the NextItem
-             channel.  The manager sends the next item from the list
-             over this channel to the worker, and loops.
-
-          b. New data is sent to the manager on the newlist channel.
-             This happens when another goroutine calls
-             manager.ReplaceItem() with a new list.  The manager
-             discards the current list, replaces it with the new one,
-             and begins looping again.
-
-          c. The input channel is closed.  The manager closes its
-             output channel (signalling any workers to quit) and
-             terminates.
-
-   Tasks currently handled by WorkQueue:
-     * the pull list
-     * the trash list
-
-   Example usage:
-
-        // Any kind of user-defined type can be used with the
-        // WorkQueue.
-		type FrobRequest struct {
-			frob string
-		}
-
-		// Make a work list.
-		froblist := NewWorkQueue()
-
-		// Start a concurrent worker to read items from the NextItem
-		// channel until it is closed, deleting each one.
-		go func(list WorkQueue) {
-			for i := range list.NextItem {
-				req := i.(FrobRequest)
-				frob.Run(req)
-			}
-		}(froblist)
-
-		// Set up a HTTP handler for PUT /frob
-		router.HandleFunc(`/frob`,
-			func(w http.ResponseWriter, req *http.Request) {
-				// Parse the request body into a list.List
-				// of FrobRequests, and give this list to the
-				// frob manager.
-				newfrobs := parseBody(req.Body)
-				froblist.ReplaceQueue(newfrobs)
-			}).Methods("PUT")
-
-   Methods available on a WorkQueue:
-
-		ReplaceQueue(list)
-			Replaces the current item list with a new one.  The list
-            manager discards any unprocessed items on the existing
-            list and replaces it with the new one. If the worker is
-            processing a list item when ReplaceQueue is called, it
-            finishes processing before receiving items from the new
-            list.
-		Close()
-			Shuts down the manager goroutine. When Close is called,
-			the manager closes the NextItem channel.
-*/
-
-import "container/list"
-
-// WorkQueue definition
-type WorkQueue struct {
-	getStatus chan WorkQueueStatus
-	newlist   chan *list.List
-	// Workers get work items by reading from this channel.
-	NextItem <-chan interface{}
-	// Each worker must send struct{}{} to DoneItem exactly once
-	// for each work item received from NextItem, when it stops
-	// working on that item (regardless of whether the work was
-	// successful).
-	DoneItem chan<- struct{}
-}
-
-// WorkQueueStatus reflects the queue status.
-type WorkQueueStatus struct {
-	InProgress int
-	Queued     int
-}
-
-// NewWorkQueue returns a new empty WorkQueue.
-func NewWorkQueue() *WorkQueue {
-	nextItem := make(chan interface{})
-	reportDone := make(chan struct{})
-	newList := make(chan *list.List)
-	b := WorkQueue{
-		getStatus: make(chan WorkQueueStatus),
-		newlist:   newList,
-		NextItem:  nextItem,
-		DoneItem:  reportDone,
-	}
-	go func() {
-		// Read new work lists from the newlist channel.
-		// Reply to "status" and "get next item" queries by
-		// sending to the getStatus and nextItem channels
-		// respectively. Return when the newlist channel
-		// closes.
-
-		todo := &list.List{}
-		status := WorkQueueStatus{}
-
-		// When we're done, close the output channel; workers will
-		// shut down next time they ask for new work.
-		defer close(nextItem)
-		defer close(b.getStatus)
-
-		// nextChan and nextVal are both nil when we have
-		// nothing to send; otherwise they are, respectively,
-		// the nextItem channel and the next work item to send
-		// to it.
-		var nextChan chan interface{}
-		var nextVal interface{}
-
-		for newList != nil || status.InProgress > 0 {
-			select {
-			case p, ok := <-newList:
-				if !ok {
-					// Closed, stop receiving
-					newList = nil
-				}
-				todo = p
-				if todo == nil {
-					todo = &list.List{}
-				}
-				status.Queued = todo.Len()
-				if status.Queued == 0 {
-					// Stop sending work
-					nextChan = nil
-					nextVal = nil
-				} else {
-					nextChan = nextItem
-					nextVal = todo.Front().Value
-				}
-			case nextChan <- nextVal:
-				todo.Remove(todo.Front())
-				status.InProgress++
-				status.Queued--
-				if status.Queued == 0 {
-					// Stop sending work
-					nextChan = nil
-					nextVal = nil
-				} else {
-					nextVal = todo.Front().Value
-				}
-			case <-reportDone:
-				status.InProgress--
-			case b.getStatus <- status:
-			}
-		}
-	}()
-	return &b
-}
-
-// ReplaceQueue abandons any work items left in the existing queue,
-// and starts giving workers items from the given list. After giving
-// it to ReplaceQueue, the caller must not read or write the given
-// list.
-func (b *WorkQueue) ReplaceQueue(list *list.List) {
-	b.newlist <- list
-}
-
-// Close shuts down the manager and terminates the goroutine, which
-// abandons any pending requests, but allows any pull request already
-// in progress to continue.
-//
-// After Close, Status will return correct values, NextItem will be
-// closed, and ReplaceQueue will panic.
-func (b *WorkQueue) Close() {
-	close(b.newlist)
-}
-
-// Status returns an up-to-date WorkQueueStatus reflecting the current
-// queue status.
-func (b *WorkQueue) Status() WorkQueueStatus {
-	// If the channel is closed, we get the nil value of
-	// WorkQueueStatus, which is an accurate description of a
-	// finished queue.
-	return <-b.getStatus
-}
diff --git a/services/keepstore/work_queue_test.go b/services/keepstore/work_queue_test.go
deleted file mode 100644
index 254f96cb2d..0000000000
--- a/services/keepstore/work_queue_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-	"container/list"
-	"runtime"
-	"testing"
-	"time"
-)
-
-type fatalfer interface {
-	Fatalf(string, ...interface{})
-}
-
-func makeTestWorkList(ary []interface{}) *list.List {
-	l := list.New()
-	for _, n := range ary {
-		l.PushBack(n)
-	}
-	return l
-}
-
-func expectChannelEmpty(t fatalfer, c <-chan interface{}) {
-	select {
-	case item, ok := <-c:
-		if ok {
-			t.Fatalf("Received value (%+v) from channel that we expected to be empty", item)
-		}
-	default:
-	}
-}
-
-func expectChannelNotEmpty(t fatalfer, c <-chan interface{}) interface{} {
-	select {
-	case item, ok := <-c:
-		if !ok {
-			t.Fatalf("expected data on a closed channel")
-		}
-		return item
-	case <-time.After(time.Second):
-		t.Fatalf("expected data on an empty channel")
-		return nil
-	}
-}
-
-func expectChannelClosedWithin(t fatalfer, timeout time.Duration, c <-chan interface{}) {
-	select {
-	case received, ok := <-c:
-		if ok {
-			t.Fatalf("Expected channel to be closed, but received %+v instead", received)
-		}
-	case <-time.After(timeout):
-		t.Fatalf("Expected channel to be closed, but it is still open after %v", timeout)
-	}
-}
-
-func doWorkItems(t fatalfer, q *WorkQueue, expected []interface{}) {
-	for i := range expected {
-		actual, ok := <-q.NextItem
-		if !ok {
-			t.Fatalf("Expected %+v but channel was closed after receiving %+v as expected.", expected, expected[:i])
-		}
-		q.DoneItem <- struct{}{}
-		if actual.(int) != expected[i] {
-			t.Fatalf("Expected %+v but received %+v after receiving %+v as expected.", expected[i], actual, expected[:i])
-		}
-	}
-}
-
-func expectEqualWithin(t fatalfer, timeout time.Duration, expect interface{}, f func() interface{}) {
-	ok := make(chan struct{})
-	giveup := false
-	go func() {
-		for f() != expect && !giveup {
-			time.Sleep(time.Millisecond)
-		}
-		close(ok)
-	}()
-	select {
-	case <-ok:
-	case <-time.After(timeout):
-		giveup = true
-		_, file, line, _ := runtime.Caller(1)
-		t.Fatalf("Still getting %+v, timed out waiting for %+v\n%s:%d", f(), expect, file, line)
-	}
-}
-
-func expectQueued(t fatalfer, b *WorkQueue, expectQueued int) {
-	if l := b.Status().Queued; l != expectQueued {
-		t.Fatalf("Got Queued==%d, expected %d", l, expectQueued)
-	}
-}
-
-func TestWorkQueueDoneness(t *testing.T) {
-	b := NewWorkQueue()
-	defer b.Close()
-	b.ReplaceQueue(makeTestWorkList([]interface{}{1, 2, 3}))
-	expectQueued(t, b, 3)
-	gate := make(chan struct{})
-	go func() {
-		<-gate
-		for range b.NextItem {
-			<-gate
-			time.Sleep(time.Millisecond)
-			b.DoneItem <- struct{}{}
-		}
-	}()
-	expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
-	b.ReplaceQueue(makeTestWorkList([]interface{}{4, 5, 6}))
-	for i := 1; i <= 3; i++ {
-		gate <- struct{}{}
-		expectEqualWithin(t, time.Second, 3-i, func() interface{} { return b.Status().Queued })
-		expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
-	}
-	close(gate)
-	expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
-	expectChannelEmpty(t, b.NextItem)
-}
-
-// Create a WorkQueue, generate a list for it, and instantiate a worker.
-func TestWorkQueueReadWrite(t *testing.T) {
-	var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-
-	b := NewWorkQueue()
-	expectQueued(t, b, 0)
-
-	b.ReplaceQueue(makeTestWorkList(input))
-	expectQueued(t, b, len(input))
-
-	doWorkItems(t, b, input)
-	expectChannelEmpty(t, b.NextItem)
-	b.Close()
-}
-
-// Start a worker before the list has any input.
-func TestWorkQueueEarlyRead(t *testing.T) {
-	var input = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-
-	b := NewWorkQueue()
-	defer b.Close()
-
-	// First, demonstrate that nothing is available on the NextItem
-	// channel.
-	expectChannelEmpty(t, b.NextItem)
-
-	// Start a reader in a goroutine. The reader will block until the
-	// block work list has been initialized.
-	//
-	done := make(chan int)
-	go func() {
-		doWorkItems(t, b, input)
-		done <- 1
-	}()
-
-	// Feed the blocklist a new worklist, and wait for the worker to
-	// finish.
-	b.ReplaceQueue(makeTestWorkList(input))
-	<-done
-	expectQueued(t, b, 0)
-}
-
-// After Close(), NextItem closes, work finishes, then stats return zero.
-func TestWorkQueueClose(t *testing.T) {
-	b := NewWorkQueue()
-	input := []interface{}{1, 2, 3, 4, 5, 6, 7, 8}
-	mark := make(chan struct{})
-	go func() {
-		<-b.NextItem
-		mark <- struct{}{}
-		<-mark
-		b.DoneItem <- struct{}{}
-	}()
-	b.ReplaceQueue(makeTestWorkList(input))
-	// Wait for worker to take item 1
-	<-mark
-	b.Close()
-	expectEqualWithin(t, time.Second, 1, func() interface{} { return b.Status().InProgress })
-	// Tell worker to report done
-	mark <- struct{}{}
-	expectEqualWithin(t, time.Second, 0, func() interface{} { return b.Status().InProgress })
-	expectChannelClosedWithin(t, time.Second, b.NextItem)
-}
-
-// Show that a reader may block when the manager's list is exhausted,
-// and that the reader resumes automatically when new data is
-// available.
-func TestWorkQueueReaderBlocks(t *testing.T) {
-	var (
-		inputBeforeBlock = []interface{}{1, 2, 3, 4, 5}
-		inputAfterBlock  = []interface{}{6, 7, 8, 9, 10}
-	)
-
-	b := NewWorkQueue()
-	defer b.Close()
-	sendmore := make(chan int)
-	done := make(chan int)
-	go func() {
-		doWorkItems(t, b, inputBeforeBlock)
-
-		// Confirm that the channel is empty, so a subsequent read
-		// on it will block.
-		expectChannelEmpty(t, b.NextItem)
-
-		// Signal that we're ready for more input.
-		sendmore <- 1
-		doWorkItems(t, b, inputAfterBlock)
-		done <- 1
-	}()
-
-	// Write a slice of the first five elements and wait for the
-	// reader to signal that it's ready for us to send more input.
-	b.ReplaceQueue(makeTestWorkList(inputBeforeBlock))
-	<-sendmore
-
-	b.ReplaceQueue(makeTestWorkList(inputAfterBlock))
-
-	// Wait for the reader to complete.
-	<-done
-}
-
-// Replace one active work list with another.
-func TestWorkQueueReplaceQueue(t *testing.T) {
-	var firstInput = []interface{}{1, 1, 2, 3, 5, 8, 13, 21, 34}
-	var replaceInput = []interface{}{1, 4, 9, 16, 25, 36, 49, 64, 81}
-
-	b := NewWorkQueue()
-	b.ReplaceQueue(makeTestWorkList(firstInput))
-
-	// Read just the first five elements from the work list.
-	// Confirm that the channel is not empty.
-	doWorkItems(t, b, firstInput[0:5])
-	expectChannelNotEmpty(t, b.NextItem)
-
-	// Replace the work list and read five more elements.
-	// The old list should have been discarded and all new
-	// elements come from the new list.
-	b.ReplaceQueue(makeTestWorkList(replaceInput))
-	doWorkItems(t, b, replaceInput[0:5])
-
-	b.Close()
-}

commit 99ad1527cffd776ef7f9a3d46cf3ed6b2188d010
Author: Tom Clegg <tom at curii.com>
Date:   Thu Feb 8 14:54:19 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 9555864cc6..9471936bc2 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -521,13 +521,17 @@ func (ce *checkEqual) Write(p []byte) (int, error) {
 	return len(p), nil
 }
 
-func (ks *keepstore) BlockUntrash(ctx context.Context, hash string) error {
+func (ks *keepstore) BlockUntrash(ctx context.Context, locator string) error {
+	li, err := parseLocator(locator)
+	if err != nil {
+		return err
+	}
 	var errToCaller error = os.ErrNotExist
 	for _, mnt := range ks.mountsW {
 		if ctx.Err() != nil {
 			return ctx.Err()
 		}
-		err := mnt.BlockUntrash(hash)
+		err := mnt.BlockUntrash(li.hash)
 		if err == nil {
 			errToCaller = nil
 		} else if !os.IsNotExist(err) && errToCaller != nil {
@@ -537,13 +541,17 @@ func (ks *keepstore) BlockUntrash(ctx context.Context, hash string) error {
 	return errToCaller
 }
 
-func (ks *keepstore) BlockTouch(ctx context.Context, hash string) error {
+func (ks *keepstore) BlockTouch(ctx context.Context, locator string) error {
+	li, err := parseLocator(locator)
+	if err != nil {
+		return err
+	}
 	var errToCaller error = os.ErrNotExist
 	for _, mnt := range ks.mountsW {
 		if ctx.Err() != nil {
 			return ctx.Err()
 		}
-		err := mnt.BlockTouch(hash)
+		err := mnt.BlockTouch(li.hash)
 		if err == nil {
 			errToCaller = nil
 		} else if !os.IsNotExist(err) && errToCaller != nil {
@@ -553,7 +561,14 @@ func (ks *keepstore) BlockTouch(ctx context.Context, hash string) error {
 	return errToCaller
 }
 
-func (ks *keepstore) BlockTrash(ctx context.Context, hash string) error {
+func (ks *keepstore) BlockTrash(ctx context.Context, locator string) error {
+	if !ks.cluster.Collections.BlobTrash {
+		return errMethodNotAllowed
+	}
+	li, err := parseLocator(locator)
+	if err != nil {
+		return err
+	}
 	var errToCaller error = os.ErrNotExist
 	for _, mnt := range ks.mounts {
 		if !mnt.AllowTrash {
@@ -562,8 +577,11 @@ func (ks *keepstore) BlockTrash(ctx context.Context, hash string) error {
 		if ctx.Err() != nil {
 			return ctx.Err()
 		}
-		err := mnt.BlockTrash(hash)
-		if !os.IsNotExist(err) {
+		t, err := mnt.Mtime(li.hash)
+		if err == nil && time.Now().Sub(t) > ks.cluster.Collections.BlobSigningTTL.Duration() {
+			err = mnt.BlockTrash(li.hash)
+		}
+		if os.IsNotExist(errToCaller) || (errToCaller == nil && !os.IsNotExist(err)) {
 			errToCaller = err
 		}
 	}
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
index 686c413958..2d66014a62 100644
--- a/services/keepstore/keepstore_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -13,6 +13,7 @@ import (
 	"io"
 	"net/http"
 	"os"
+	"sort"
 	"strings"
 	"sync"
 	"time"
@@ -334,14 +335,119 @@ func (s *keepstoreSuite) TestIndex(c *C) {
 }
 
 func (s *keepstoreSuite) TestBlockTrash(c *C) {
-	s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
-	c.Fatal("todo: trash block")
-	c.Fatal("todo: trash nonexistent block => 404")
-}
+	s.cluster.Volumes = map[string]arvados.Volume{
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true},
+		"zzzzz-nyw5e-333333333333333": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+	}
+	ks, cancel := testKeepstore(c, s.cluster, nil)
+	defer cancel()
 
-func (s *keepstoreSuite) TestPutNeedsOnlyOneBuffer(c *C) {
-	c.Fatal("todo")
-	ok := make(chan bool)
+	var vol []*stubVolume
+	for _, mount := range ks.mountsR {
+		vol = append(vol, mount.volume.(*stubVolume))
+	}
+	sort.Slice(vol, func(i, j int) bool {
+		return vol[i].params.UUID < vol[j].params.UUID
+	})
+
+	ctx := context.Background()
+	loc := fooHash + "+3"
+	tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
+
+	clear := func() {
+		for _, vol := range vol {
+			err := vol.BlockTrash(fooHash)
+			if !os.IsNotExist(err) {
+				c.Assert(err, IsNil)
+			}
+		}
+	}
+	writeit := func(volidx int) {
+		err := vol[volidx].BlockWrite(ctx, fooHash, []byte("foo"))
+		c.Assert(err, IsNil)
+		err = vol[volidx].blockTouchWithTime(fooHash, tOld)
+		c.Assert(err, IsNil)
+	}
+	trashit := func() error {
+		return ks.BlockTrash(ctx, loc)
+	}
+	checkexists := func(volidx int) bool {
+		_, err := vol[volidx].BlockRead(ctx, fooHash, io.Discard)
+		if !os.IsNotExist(err) {
+			c.Check(err, IsNil)
+		}
+		return err == nil
+	}
+
+	clear()
+	c.Check(trashit(), Equals, os.ErrNotExist)
+
+	// one old replica => trash it
+	clear()
+	writeit(0)
+	c.Check(trashit(), IsNil)
+	c.Check(checkexists(0), Equals, false)
+
+	// one old replica + one new replica => keep new, trash old
+	clear()
+	writeit(0)
+	writeit(1)
+	c.Check(vol[1].blockTouchWithTime(fooHash, time.Now()), IsNil)
+	c.Check(trashit(), IsNil)
+	c.Check(checkexists(0), Equals, false)
+	c.Check(checkexists(1), Equals, true)
+
+	// two old replicas => trash both
+	clear()
+	writeit(0)
+	writeit(1)
+	c.Check(trashit(), IsNil)
+	c.Check(checkexists(0), Equals, false)
+	c.Check(checkexists(1), Equals, false)
+
+	// four old replicas => trash all except readonly volume with
+	// AllowTrashWhenReadOnly==false
+	clear()
+	writeit(0)
+	writeit(1)
+	writeit(2)
+	writeit(3)
+	c.Check(trashit(), IsNil)
+	c.Check(checkexists(0), Equals, false)
+	c.Check(checkexists(1), Equals, false)
+	c.Check(checkexists(2), Equals, true)
+	c.Check(checkexists(3), Equals, false)
+
+	// two old replicas but one returns an error => return the
+	// only non-404 backend error
+	clear()
+	vol[0].blockTrash = func(hash string) error {
+		return errors.New("fake error")
+	}
+	writeit(0)
+	writeit(3)
+	c.Check(trashit(), ErrorMatches, "fake error")
+	c.Check(checkexists(0), Equals, true)
+	c.Check(checkexists(1), Equals, false)
+	c.Check(checkexists(2), Equals, false)
+	c.Check(checkexists(3), Equals, false)
+}
+
+func (s *keepstoreSuite) TestBlockWrite_OnlyOneBuffer(c *C) {
+	s.cluster.API.MaxKeepBlobBuffers = 1
+	ks, cancel := testKeepstore(c, s.cluster, nil)
+	defer cancel()
+	ok := make(chan struct{})
+	go func() {
+		defer close(ok)
+		ctx := authContext(arvadostest.ActiveTokenV2)
+		_, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+			Hash: fooHash,
+			Data: []byte("foo")})
+		c.Check(err, IsNil)
+	}()
 	select {
 	case <-ok:
 	case <-time.After(time.Second):
@@ -350,11 +456,40 @@ func (s *keepstoreSuite) TestPutNeedsOnlyOneBuffer(c *C) {
 }
 
 func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
-	c.Fatal("todo")
+	s.cluster.API.MaxKeepBlobBuffers = 4
+	ks, cancel := testKeepstore(c, s.cluster, nil)
+	defer cancel()
+
+	ctx := authContext(arvadostest.ActiveTokenV2)
+	var wg sync.WaitGroup
+	for range make([]int, 20) {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+				Hash: fooHash,
+				Data: []byte("foo")})
+			c.Check(err, IsNil)
+			_, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+				Locator: resp.Locator,
+				WriteTo: io.Discard})
+			c.Check(err, IsNil)
+		}()
+	}
+	ok := make(chan struct{})
+	go func() {
+		wg.Wait()
+		close(ok)
+	}()
+	select {
+	case <-ok:
+	case <-time.After(time.Second):
+		c.Fatal("read/write sequence deadlocks, likely buffer pool leak")
+	}
 }
 
 func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
-	c.Fatal("todo: volume with no specified classes implies 'default'")
+	c.Error("todo: volume with no specified classes implies 'default'")
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"}, // "default" is implicit
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"special": true, "extra": true}},
@@ -421,8 +556,8 @@ func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 }
 
 func (s *keepstoreSuite) TestUntrashHandler(c *C) {
-	c.Fatal("todo")
 	c.Check("resp", Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
+	c.Error("todo")
 }
 
 func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
@@ -453,10 +588,6 @@ func (s *keepstoreSuite) TestBlockWrite_SkipReadonly(c *C) {
 	c.Fatal("todo")
 }
 
-func (s *keepstoreSuite) TestBlockTrash_SkipReadonly(c *C) {
-	c.Fatal("todo")
-}
-
 func (s *keepstoreSuite) TestBlockRead_VolumeError503(c *C) {
 	c.Fatal("todo: return 503 ")
 }
diff --git a/services/keepstore/pull_worker.go b/services/keepstore/pull_worker.go
index 760ba90b46..18379dc0cb 100644
--- a/services/keepstore/pull_worker.go
+++ b/services/keepstore/pull_worker.go
@@ -25,6 +25,10 @@ type puller struct {
 	keepstore *keepstore
 	todo      []PullListItem
 	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+
+	// For the benefit of test cases: if this channel is non-nil,
+	// send len(todo) to it after processing each pull list item.
+	notifyTodoLen chan int
 }
 
 func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *puller {
@@ -90,8 +94,13 @@ func (p *puller) runWorker(ctx context.Context) {
 	}()
 	for {
 		p.cond.L.Lock()
-		for len(p.todo) == 0 && ctx.Err() == nil {
-			p.cond.Wait()
+		if len(p.todo) == 0 {
+			for len(p.todo) == 0 && ctx.Err() == nil {
+				p.cond.Wait()
+			}
+			if p.notifyTodoLen != nil {
+				p.notifyTodoLen <- len(p.todo)
+			}
 		}
 		if ctx.Err() != nil {
 			return
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
index 1cad9af251..8be9b8a113 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/router_test.go
@@ -13,6 +13,7 @@ import (
 	"io"
 	"net/http"
 	"net/http/httptest"
+	"os"
 	"sort"
 	"strings"
 	"time"
@@ -123,8 +124,6 @@ func (s *routerSuite) TestBlockWrite_Headers(c *C) {
 	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
-	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
-
 	resp := call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Arvados-Replicas-Desired": []string{"2"}})
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
@@ -147,6 +146,40 @@ func sortCommaSeparated(s string) string {
 	return strings.Join(slice, ", ")
 }
 
+func (s *routerSuite) TestBlockTrash(c *C) {
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+
+	vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+	err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+	c.Assert(err, IsNil)
+	err = vol0.blockTouchWithTime(fooHash, time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration()))
+	c.Assert(err, IsNil)
+	resp := call(router, "DELETE", "http://example/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(vol0.stubLog.String(), Matches, `(?ms).* trash .*`)
+	_, err = vol0.BlockRead(context.Background(), fooHash, io.Discard)
+	c.Assert(err, Equals, os.ErrNotExist)
+}
+
+func (s *routerSuite) TestBlockUntrash(c *C) {
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+
+	vol0 := router.keepstore.mountsW[0].volume.(*stubVolume)
+	err := vol0.BlockWrite(context.Background(), fooHash, []byte("foo"))
+	c.Assert(err, IsNil)
+	err = vol0.BlockTrash(fooHash)
+	c.Assert(err, IsNil)
+	_, err = vol0.BlockRead(context.Background(), fooHash, io.Discard)
+	c.Assert(err, Equals, os.ErrNotExist)
+	resp := call(router, "PUT", "http://example/untrash/"+fooHash+"+3", s.cluster.SystemRootToken, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(vol0.stubLog.String(), Matches, `(?ms).* untrash .*`)
+	_, err = vol0.BlockRead(context.Background(), fooHash, io.Discard)
+	c.Check(err, IsNil)
+}
+
 func (s *routerSuite) TestBadRequest(c *C) {
 	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
index a0917eb0d2..bed20bf538 100644
--- a/services/keepstore/trash_worker_test.go
+++ b/services/keepstore/trash_worker_test.go
@@ -68,9 +68,6 @@ func (s *routerSuite) TestTrashList_Execute(c *C) {
 	sort.Slice(mounts, func(i, j int) bool {
 		return mounts[i].UUID < mounts[j].UUID
 	})
-	c.Check(mounts[0].UUID, Equals, "zzzzz-nyw5e-000000000000000")
-	c.Check(mounts[1].UUID, Equals, "zzzzz-nyw5e-111111111111111")
-	c.Check(mounts[2].UUID, Equals, "zzzzz-nyw5e-222222222222222")
 
 	// Make vols (stub volumes) in same order as mounts
 	var vols []*stubVolume
@@ -78,13 +75,16 @@ func (s *routerSuite) TestTrashList_Execute(c *C) {
 		vols = append(vols, router.keepstore.mounts[mount.UUID].volume.(*stubVolume))
 	}
 
+	// The "trial" loop below will construct the trashList which
+	// we'll send to trasher via router, plus a slice of checks
+	// which we'll run after the trasher has finished executing
+	// the list.
 	var trashList []TrashListItem
+	var checks []func()
 
 	tNew := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() / 2)
 	tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
 
-	var checks []func()
-
 	for _, trial := range []struct {
 		comment        string
 		storeMtime     []time.Time

commit f1c1f626326db5387c75b59708461da453054de9
Author: Tom Clegg <tom at curii.com>
Date:   Thu Feb 8 10:34:03 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keep-balance/change_set.go b/services/keep-balance/change_set.go
index c3579556bb..bb5a1dbf5e 100644
--- a/services/keep-balance/change_set.go
+++ b/services/keep-balance/change_set.go
@@ -10,6 +10,7 @@ import (
 	"sync"
 
 	"git.arvados.org/arvados.git/sdk/go/arvados"
+	"git.arvados.org/arvados.git/services/keepstore"
 )
 
 // Pull is a request to retrieve a block from a remote server, and
@@ -23,12 +24,7 @@ type Pull struct {
 // MarshalJSON formats a pull request the way keepstore wants to see
 // it.
 func (p Pull) MarshalJSON() ([]byte, error) {
-	type KeepstorePullRequest struct {
-		Locator   string   `json:"locator"`
-		Servers   []string `json:"servers"`
-		MountUUID string   `json:"mount_uuid"`
-	}
-	return json.Marshal(KeepstorePullRequest{
+	return json.Marshal(keepstore.PullListItem{
 		Locator:   string(p.SizedDigest[:32]),
 		Servers:   []string{p.From.URLBase()},
 		MountUUID: p.To.KeepMount.UUID,
@@ -45,12 +41,7 @@ type Trash struct {
 // MarshalJSON formats a trash request the way keepstore wants to see
 // it, i.e., as a bare locator with no +size hint.
 func (t Trash) MarshalJSON() ([]byte, error) {
-	type KeepstoreTrashRequest struct {
-		Locator    string `json:"locator"`
-		BlockMtime int64  `json:"block_mtime"`
-		MountUUID  string `json:"mount_uuid"`
-	}
-	return json.Marshal(KeepstoreTrashRequest{
+	return json.Marshal(keepstore.TrashListItem{
 		Locator:    string(t.SizedDigest[:32]),
 		BlockMtime: t.Mtime,
 		MountUUID:  t.From.KeepMount.UUID,
diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index 5846095c8c..bdd669bb46 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -5,13 +5,11 @@
 package keepstore
 
 import (
-	"bytes"
 	"context"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"os"
 	"regexp"
@@ -321,9 +319,6 @@ func (v *AzureBlobVolume) get(ctx context.Context, hash string, dst io.WriterAt)
 // BlockWrite stores a block on the volume. If it already exists, its
 // timestamp is updated.
 func (v *AzureBlobVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
 	// Send the block data through a pipe, so that (if we need to)
 	// we can close the pipe early and abandon our
 	// CreateBlockBlobFromReader() goroutine, without worrying
@@ -331,7 +326,7 @@ func (v *AzureBlobVolume) BlockWrite(ctx context.Context, hash string, data []by
 	// buffer after we release it.
 	bufr, bufw := io.Pipe()
 	go func() {
-		io.Copy(bufw, bytes.NewReader(data))
+		bufw.Write(data)
 		bufw.Close()
 	}()
 	errChan := make(chan error, 1)
@@ -351,13 +346,10 @@ func (v *AzureBlobVolume) BlockWrite(ctx context.Context, hash string, data []by
 	select {
 	case <-ctx.Done():
 		ctxlog.FromContext(ctx).Debugf("%s: taking CreateBlockBlobFromReader's input away: %s", v, ctx.Err())
-		// Our pipe might be stuck in Write(), waiting for
-		// io.Copy() to read. If so, un-stick it. This means
-		// CreateBlockBlobFromReader will get corrupt data,
-		// but that's OK: the size won't match, so the write
-		// will fail.
-		go io.Copy(ioutil.Discard, bufr)
-		// CloseWithError() will return once pending I/O is done.
+		// bufw.CloseWithError() interrupts bufw.Write() if
+		// necessary, ensuring CreateBlockBlobFromReader can't
+		// read any more of our data slice via bufr after we
+		// return.
 		bufw.CloseWithError(ctx.Err())
 		ctxlog.FromContext(ctx).Debugf("%s: abandoning CreateBlockBlobFromReader goroutine", v)
 		return ctx.Err()
@@ -368,9 +360,6 @@ func (v *AzureBlobVolume) BlockWrite(ctx context.Context, hash string, data []by
 
 // BlockTouch updates the last-modified property of a block blob.
 func (v *AzureBlobVolume) BlockTouch(hash string) error {
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
 	trashed, metadata, err := v.checkTrashed(hash)
 	if err != nil {
 		return err
@@ -460,9 +449,6 @@ func (v *AzureBlobVolume) listBlobs(page int, params storage.ListBlobsParameters
 
 // Trash a Keep block.
 func (v *AzureBlobVolume) BlockTrash(loc string) error {
-	if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
-		return errMethodNotAllowed
-	}
 	// Ideally we would use If-Unmodified-Since, but that
 	// particular condition seems to be ignored by Azure. Instead,
 	// we get the Etag before checking Mtime, and use If-Match to
@@ -674,7 +660,7 @@ func (c *azureContainer) GetBlob(bname string) (io.ReadCloser, error) {
 	b := c.ctr.GetBlobReference(bname)
 	rdr, err := b.Get(nil)
 	c.stats.TickErr(err)
-	return NewCountingReader(rdr, c.stats.TickInBytes), err
+	return newCountingReader(rdr, c.stats.TickInBytes), err
 }
 
 func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storage.GetBlobOptions) (io.ReadCloser, error) {
@@ -689,7 +675,7 @@ func (c *azureContainer) GetBlobRange(bname string, start, end int, opts *storag
 		GetBlobOptions: opts,
 	})
 	c.stats.TickErr(err)
-	return NewCountingReader(rdr, c.stats.TickInBytes), err
+	return newCountingReader(rdr, c.stats.TickInBytes), err
 }
 
 // If we give it an io.Reader that doesn't also have a Len() int
@@ -710,7 +696,7 @@ func (c *azureContainer) CreateBlockBlobFromReader(bname string, size int, rdr i
 	c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
 	if size != 0 {
 		rdr = &readerWithAzureLen{
-			Reader: NewCountingReader(rdr, c.stats.TickOutBytes),
+			Reader: newCountingReader(rdr, c.stats.TickOutBytes),
 			len:    size,
 		}
 	}
diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
index ab1f84ec05..a543dfc245 100644
--- a/services/keepstore/azure_blob_volume_test.go
+++ b/services/keepstore/azure_blob_volume_test.go
@@ -366,14 +366,14 @@ func (d *azStubDialer) Dial(network, address string) (net.Conn, error) {
 	return d.Dialer.Dial(network, address)
 }
 
-type TestableAzureBlobVolume struct {
+type testableAzureBlobVolume struct {
 	*AzureBlobVolume
 	azHandler *azStubHandler
 	azStub    *httptest.Server
 	t         TB
 }
 
-func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolumeParams) *TestableAzureBlobVolume {
+func (s *stubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolumeParams) *testableAzureBlobVolume {
 	azHandler := newAzStubHandler(t.(*check.C))
 	azStub := httptest.NewServer(azHandler)
 
@@ -415,7 +415,7 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolum
 		t.Fatal(err)
 	}
 
-	return &TestableAzureBlobVolume{
+	return &testableAzureBlobVolume{
 		AzureBlobVolume: v,
 		azHandler:       azHandler,
 		azStub:          azStub,
@@ -423,32 +423,33 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolum
 	}
 }
 
-var _ = check.Suite(&StubbedAzureBlobSuite{})
+var _ = check.Suite(&stubbedAzureBlobSuite{})
 
-type StubbedAzureBlobSuite struct {
+type stubbedAzureBlobSuite struct {
 	origHTTPTransport http.RoundTripper
 }
 
-func (s *StubbedAzureBlobSuite) SetUpTest(c *check.C) {
+func (s *stubbedAzureBlobSuite) SetUpSuite(c *check.C) {
 	s.origHTTPTransport = http.DefaultTransport
 	http.DefaultTransport = &http.Transport{
 		Dial: (&azStubDialer{logger: ctxlog.TestLogger(c)}).Dial,
 	}
 }
 
-func (s *StubbedAzureBlobSuite) TearDownTest(c *check.C) {
+func (s *stubbedAzureBlobSuite) TearDownSuite(c *check.C) {
 	http.DefaultTransport = s.origHTTPTransport
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
 	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
 		return s.newTestableAzureBlobVolume(t, params)
 	})
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
 	// Test (BlockSize mod azureMaxGetBytes)==0 and !=0 cases
-	for _, b := range []int{2 << 22, 2<<22 - 1} {
+	for _, b := range []int{2<<22 - 1, 2<<22 - 1} {
+		c.Logf("=== MaxGetBytes=%d", b)
 		DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
 			v := s.newTestableAzureBlobVolume(t, params)
 			v.MaxGetBytes = b
@@ -457,13 +458,13 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C)
 	}
 }
 
-func (s *StubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
 	DoGenericVolumeTests(c, false, func(c TB, params newVolumeParams) TestableVolume {
 		return s.newTestableAzureBlobVolume(c, params)
 	})
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
 	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
 		Cluster:      testCluster(c),
 		ConfigVolume: arvados.Volume{Replication: 3},
@@ -504,7 +505,7 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
 	}
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
 	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
 		Cluster:      testCluster(c),
 		ConfigVolume: arvados.Volume{Replication: 3},
@@ -545,7 +546,7 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
 	wg.Wait()
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
 	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
 		Cluster:      testCluster(c),
 		ConfigVolume: arvados.Volume{Replication: 3},
@@ -592,21 +593,21 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *che
 	}
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockRead(c *check.C) {
-	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockRead(c *check.C) {
+	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *testableAzureBlobVolume) error {
 		v.BlockWriteRaw(TestHash, TestBlock)
 		_, err := v.BlockRead(ctx, TestHash, io.Discard)
 		return err
 	})
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockWrite(c *check.C) {
-	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
+func (s *stubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockWrite(c *check.C) {
+	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *testableAzureBlobVolume) error {
 		return v.BlockWrite(ctx, TestHash, make([]byte, BlockSize))
 	})
 }
 
-func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *TestableAzureBlobVolume) error) {
+func (s *stubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *testableAzureBlobVolume) error) {
 	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
 		Cluster:      testCluster(c),
 		ConfigVolume: arvados.Volume{Replication: 3},
@@ -647,7 +648,7 @@ func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, tes
 	}()
 }
 
-func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
+func (s *stubbedAzureBlobSuite) TestStats(c *check.C) {
 	volume := s.newTestableAzureBlobVolume(c, newVolumeParams{
 		Cluster:      testCluster(c),
 		ConfigVolume: arvados.Volume{Replication: 3},
@@ -685,19 +686,19 @@ func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
 }
 
-func (v *TestableAzureBlobVolume) BlockWriteRaw(locator string, data []byte) {
+func (v *testableAzureBlobVolume) BlockWriteRaw(locator string, data []byte) {
 	v.azHandler.BlockWriteRaw(v.ContainerName, locator, data)
 }
 
-func (v *TestableAzureBlobVolume) TouchWithDate(locator string, lastBlockWrite time.Time) {
+func (v *testableAzureBlobVolume) TouchWithDate(locator string, lastBlockWrite time.Time) {
 	v.azHandler.TouchWithDate(v.ContainerName, locator, lastBlockWrite)
 }
 
-func (v *TestableAzureBlobVolume) Teardown() {
+func (v *testableAzureBlobVolume) Teardown() {
 	v.azStub.Close()
 }
 
-func (v *TestableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableAzureBlobVolume) ReadWriteOperationLabelValues() (r, w string) {
 	return "get", "create"
 }
 
diff --git a/services/keepstore/count.go b/services/keepstore/count.go
index 700ca19dec..51434a803e 100644
--- a/services/keepstore/count.go
+++ b/services/keepstore/count.go
@@ -8,21 +8,21 @@ import (
 	"io"
 )
 
-func NewCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
+func newCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
 	return &countingReadWriter{
 		writer:  w,
 		counter: f,
 	}
 }
 
-func NewCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
+func newCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
 	return &countingReadWriter{
 		reader:  r,
 		counter: f,
 	}
 }
 
-func NewCountingReaderAtSeeker(r readerAtSeeker, f func(uint64)) *countingReaderAtSeeker {
+func newCountingReaderAtSeeker(r readerAtSeeker, f func(uint64)) *countingReaderAtSeeker {
 	return &countingReaderAtSeeker{readerAtSeeker: r, counter: f}
 }
 
diff --git a/services/keepstore/hashcheckwriter.go b/services/keepstore/hashcheckwriter.go
new file mode 100644
index 0000000000..f191c98e4b
--- /dev/null
+++ b/services/keepstore/hashcheckwriter.go
@@ -0,0 +1,68 @@
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package keepstore
+
+import (
+	"fmt"
+	"hash"
+	"io"
+)
+
+type hashCheckWriter struct {
+	writer       io.Writer
+	hash         hash.Hash
+	expectSize   int64
+	expectDigest string
+
+	offset int64
+}
+
+// newHashCheckWriter returns a writer that writes through to w, but
+// stops short if the written content reaches expectSize bytes and
+// does not match expectDigest according to the given hash
+// function.
+//
+// It returns a write error if more than expectSize bytes are written.
+//
+// Thus, in case of a hash mismatch, fewer than expectSize will be
+// written through.
+func newHashCheckWriter(writer io.Writer, hash hash.Hash, expectSize int64, expectDigest string) io.Writer {
+	return &hashCheckWriter{
+		writer:       writer,
+		hash:         hash,
+		expectSize:   expectSize,
+		expectDigest: expectDigest,
+	}
+}
+
+func (hcw *hashCheckWriter) Write(p []byte) (int, error) {
+	if todo := hcw.expectSize - hcw.offset - int64(len(p)); todo < 0 {
+		// Writing beyond expected size returns a checksum
+		// error without even checking the hash.
+		return 0, errChecksum
+	} else if todo > 0 {
+		// This isn't the last write, so we pass it through.
+		_, err := hcw.hash.Write(p)
+		if err != nil {
+			return 0, err
+		}
+		n, err := hcw.writer.Write(p)
+		hcw.offset += int64(n)
+		return n, err
+	} else {
+		// This is the last write, so we check the hash before
+		// writing through.
+		_, err := hcw.hash.Write(p)
+		if err != nil {
+			return 0, err
+		}
+		if digest := fmt.Sprintf("%x", hcw.hash.Sum(nil)); digest != hcw.expectDigest {
+			return 0, errChecksum
+		}
+		// Ensure subsequent write will fail
+		hcw.offset = hcw.expectSize + 1
+		return hcw.writer.Write(p)
+	}
+}
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 4ff690a42b..9555864cc6 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -14,6 +14,7 @@ import (
 	"net/http"
 	"os"
 	"sort"
+	"strconv"
 	"strings"
 	"sync"
 	"sync/atomic"
@@ -39,7 +40,8 @@ var (
 	errVolumeUnavailable = httpserver.ErrorWithStatus(errors.New("volume unavailable"), http.StatusServiceUnavailable)
 	errCollision         = httpserver.ErrorWithStatus(errors.New("hash collision"), http.StatusInternalServerError)
 	errExpiredSignature  = httpserver.ErrorWithStatus(errors.New("expired signature"), http.StatusUnauthorized)
-	errInvalidSignature  = httpserver.ErrorWithStatus(errors.New("invalid signature"), http.StatusUnauthorized)
+	errInvalidSignature  = httpserver.ErrorWithStatus(errors.New("invalid signature"), http.StatusBadRequest)
+	errInvalidLocator    = httpserver.ErrorWithStatus(errors.New("invalid locator"), http.StatusBadRequest)
 	errFull              = httpserver.ErrorWithStatus(errors.New("insufficient storage"), http.StatusInsufficientStorage)
 	errTooLarge          = httpserver.ErrorWithStatus(errors.New("request entity too large"), http.StatusRequestEntityTooLarge)
 	driver               = make(map[string]volumeDriver)
@@ -54,6 +56,7 @@ type IndexOptions struct {
 type mount struct {
 	arvados.KeepMount
 	volume
+	priority int
 }
 
 type keepstore struct {
@@ -121,6 +124,7 @@ func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
 			return fmt.Errorf("volume %s: invalid driver %q", uuid, cfgvol.Driver)
 		}
 		vol, err := dri(newVolumeParams{
+			UUID:         uuid,
 			Cluster:      ks.cluster,
 			ConfigVolume: cfgvol,
 			Logger:       ks.logger,
@@ -138,8 +142,16 @@ func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
 		if repl < 1 {
 			repl = 1
 		}
+		pri := 0
+		for class, in := range cfgvol.StorageClasses {
+			p := ks.cluster.StorageClasses[class].Priority
+			if in && p > pri {
+				pri = p
+			}
+		}
 		mnt := &mount{
-			volume: vol,
+			volume:   vol,
+			priority: pri,
 			KeepMount: arvados.KeepMount{
 				UUID:           uuid,
 				DeviceID:       vol.DeviceID(),
@@ -185,7 +197,7 @@ func (ks *keepstore) checkLocatorSignature(ctx context.Context, locator string)
 }
 
 func (ks *keepstore) signLocator(token, locator string) string {
-	if token == "" {
+	if token == "" || !ks.cluster.Collections.BlobSigning || len(ks.cluster.Collections.BlobSigningKey) == 0 {
 		return locator
 	}
 	ttl := ks.cluster.Collections.BlobSigningTTL.Duration()
@@ -193,18 +205,27 @@ func (ks *keepstore) signLocator(token, locator string) string {
 }
 
 func (ks *keepstore) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (n int, err error) {
-	if strings.Contains(opts.Locator, "+R") && !strings.Contains(opts.Locator, "+A") {
+	li, err := parseLocator(opts.Locator)
+	if err != nil {
+		return 0, err
+	}
+	out := opts.WriteTo
+	if rw, ok := out.(http.ResponseWriter); ok {
+		out = &setSizeOnWrite{ResponseWriter: rw, size: li.size}
+	}
+	if li.remote && !li.signed {
 		return ks.blockReadRemote(ctx, opts)
 	}
 	if err := ks.checkLocatorSignature(ctx, opts.Locator); err != nil {
 		return 0, err
 	}
+	out = newHashCheckWriter(out, md5.New(), int64(li.size), li.hash)
 	var errToCaller error = os.ErrNotExist
-	for _, mnt := range ks.mountsR {
+	for _, mnt := range ks.rendezvous(li.hash, ks.mountsR) {
 		if ctx.Err() != nil {
 			return 0, ctx.Err()
 		}
-		n, err = mnt.BlockRead(ctx, strings.SplitN(opts.Locator, "+", 2)[0], opts.WriteTo)
+		n, err = mnt.BlockRead(ctx, li.hash, out)
 		if n > 0 || err == nil {
 			return n, err
 		}
@@ -365,8 +386,9 @@ func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOpti
 	if opts.Hash != "" && !strings.HasPrefix(opts.Hash, hash) {
 		return resp, httpserver.ErrorWithStatus(fmt.Errorf("content hash %s did not match specified locator %s", hash, opts.Hash), http.StatusBadRequest)
 	}
+	rvzmounts := ks.rendezvous(hash, ks.mountsW)
 	result := newPutProgress(opts.StorageClasses)
-	for _, mnt := range ks.mountsW {
+	for _, mnt := range rvzmounts {
 		if !result.Want(mnt) {
 			continue
 		}
@@ -383,12 +405,14 @@ func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOpti
 	}
 	var allFull atomic.Bool
 	allFull.Store(true)
+	// pending tracks what result will be if all outstanding
+	// writes succeed.
 	pending := result.Copy()
 	cond := sync.NewCond(new(sync.Mutex))
 	cond.L.Lock()
 	var wg sync.WaitGroup
 nextmnt:
-	for _, mnt := range ks.rendezvous(hash, ks.mountsW) {
+	for _, mnt := range rvzmounts {
 		for {
 			if result.Done() || ctx.Err() != nil {
 				break nextmnt
@@ -399,6 +423,10 @@ nextmnt:
 			if pending.Want(mnt) {
 				break
 			}
+			// This mount might not be needed, depending
+			// on the outcome of pending writes. Wait for
+			// a pending write to finish, then check
+			// again.
 			cond.Wait()
 		}
 		mnt := mnt
@@ -437,9 +465,14 @@ nextmnt:
 		}
 		return resp, nil
 	}
+	if allFull.Load() {
+		return resp, errFull
+	}
 	return resp, errVolumeUnavailable
 }
 
+// rendezvous sorts the given mounts by descending priority, then by
+// rendezvous order for the given locator.
 func (*keepstore) rendezvous(locator string, mnts []*mount) []*mount {
 	hash := locator
 	if len(hash) > 32 {
@@ -456,7 +489,12 @@ func (*keepstore) rendezvous(locator string, mnts []*mount) []*mount {
 		}
 		weight[mnt] = fmt.Sprintf("%x", md5.Sum([]byte(hash+uuidpart)))
 	}
-	sort.Slice(mnts, func(i, j int) bool { return weight[mnts[i]] < weight[mnts[j]] })
+	sort.Slice(mnts, func(i, j int) bool {
+		if p := mnts[i].priority - mnts[j].priority; p != 0 {
+			return p > 0
+		}
+		return weight[mnts[i]] < weight[mnts[j]]
+	})
 	return mnts
 }
 
@@ -561,3 +599,40 @@ func ctxToken(ctx context.Context) string {
 		return ""
 	}
 }
+
+type locatorInfo struct {
+	hash   string
+	size   int
+	remote bool
+	signed bool
+}
+
+func parseLocator(loc string) (locatorInfo, error) {
+	var li locatorInfo
+	for i, part := range strings.Split(loc, "+") {
+		switch i {
+		case 0:
+			if len(part) != 32 {
+				return li, errInvalidLocator
+			}
+			li.hash = part
+		case 1:
+			size, err := strconv.Atoi(part)
+			if err != nil {
+				return li, errInvalidLocator
+			}
+			li.size = size
+		default:
+			if len(part) == 0 {
+				return li, errInvalidLocator
+			}
+			if part[0] == 'A' {
+				li.signed = true
+			}
+			if part[1] == 'R' {
+				li.remote = true
+			}
+		}
+	}
+	return li, nil
+}
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
index f5482f65bf..686c413958 100644
--- a/services/keepstore/keepstore_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -94,14 +94,15 @@ func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
 		Hash: fooHash,
 		Data: []byte("foo"),
 	})
-	c.Check(err, ErrorMatches, "checksum mismatch")
+	c.Check(err, ErrorMatches, "hash collision")
 
 	buf := bytes.NewBuffer(nil)
 	_, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
 		Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
 		WriteTo: buf,
 	})
-	c.Check(err, ErrorMatches, "checksum mismatch")
+	c.Check(err, ErrorMatches, "checksum mismatch in stored data")
+	c.Check(buf.String(), Not(Equals), "foo")
 	c.Check(buf.Len() < 3, Equals, true)
 
 	err = ks.mountsW[1].BlockWrite(ctx, fooHash, []byte("foo"))
@@ -112,7 +113,7 @@ func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
 		Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
 		WriteTo: buf,
 	})
-	c.Check(err, ErrorMatches, "checksum mismatch")
+	c.Check(err, ErrorMatches, "checksum mismatch in stored data")
 	c.Check(buf.Len() < 3, Equals, true)
 }
 
@@ -128,7 +129,8 @@ func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
 	c.Assert(err, IsNil)
 	c.Check(resp.Locator, Equals, fooHash+"+3")
 	locUnsigned := resp.Locator
-	locSigned := ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+	ttl := time.Hour
+	locSigned := arvados.SignLocator(locUnsigned, arvadostest.ActiveTokenV2, time.Now().Add(ttl), ttl, []byte(ks.cluster.Collections.BlobSigningKey))
 	c.Assert(locSigned, Not(Equals), locUnsigned)
 
 	for _, locator := range []string{locUnsigned, locSigned} {
@@ -149,27 +151,32 @@ func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
 func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
-			Driver:         "mock",
+			Driver:         "stub",
 			Replication:    1,
 			StorageClasses: map[string]bool{"class1": true}},
 		"zzzzz-nyw5e-222222222222222": {
-			Driver:         "mock",
+			Driver:         "stub",
 			Replication:    1,
 			StorageClasses: map[string]bool{"class2": true, "class3": true}},
 	}
 
+	// "foobar" is just some data that happens to result in
+	// rendezvous order {111, 222}
+	data := []byte("foobar")
+	hash := fmt.Sprintf("%x", md5.Sum(data))
+
 	for _, trial := range []struct {
 		priority1 int // priority of class1, thus vol1
 		priority2 int // priority of class2
 		priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
 		expectLog string
 	}{
-		{100, 50, 50, "111 read acb\n"},              // class1 has higher priority => try vol1 first, no need to try vol2
-		{100, 100, 100, "111 read acb\n"},            // same priority, vol1 is first in rendezvous order => try vol1 first and succeed
-		{66, 99, 33, "222 read acb\n111 read acb\n"}, // class2 has higher priority => try vol2 first, then try vol1
-		{66, 33, 99, "222 read acb\n111 read acb\n"}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
+		{100, 50, 50, "111 read 385\n"},              // class1 has higher priority => try vol1 first, no need to try vol2
+		{100, 100, 100, "111 read 385\n"},            // same priority, vol2 is first in rendezvous order => try vol1 first and succeed
+		{66, 99, 33, "222 read 385\n111 read 385\n"}, // class2 has higher priority => try vol2 first, then try vol1
+		{66, 33, 99, "222 read 385\n111 read 385\n"}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
 	} {
-		c.Logf("%+v", trial)
+		c.Logf("=== %+v", trial)
 
 		s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
 			"class1": {Priority: trial.priority1},
@@ -178,30 +185,36 @@ func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 		}
 		ks, cancel := testKeepstore(c, s.cluster, nil)
 		defer cancel()
-		stubLog := &stubLog{}
-		for _, mnt := range ks.mounts {
-			mnt.volume.(*stubVolume).stubLog = stubLog
-		}
+
 		ctx := authContext(arvadostest.ActiveTokenV2)
 		resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
-			Hash:           fooHash,
-			Data:           []byte("foo"),
+			Hash:           hash,
+			Data:           data,
 			StorageClasses: []string{"class1"},
 		})
 		c.Assert(err, IsNil)
+
+		// Combine logs into one. (We only want the logs from
+		// the BlockRead below, not from BlockWrite above.)
+		stubLog := &stubLog{}
+		for _, mnt := range ks.mounts {
+			mnt.volume.(*stubVolume).stubLog = stubLog
+		}
+
 		n, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
 			Locator: resp.Locator,
 			WriteTo: io.Discard,
 		})
-		c.Assert(n, Equals, 3)
+		c.Assert(n, Equals, len(data))
 		c.Assert(err, IsNil)
 		c.Check(stubLog.String(), Equals, trial.expectLog)
 	}
 }
 
 func (s *keepstoreSuite) TestBlockWrite_NoWritableVolumes(c *C) {
-	for _, v := range s.cluster.Volumes {
+	for uuid, v := range s.cluster.Volumes {
 		v.ReadOnly = true
+		s.cluster.Volumes[uuid] = v
 	}
 	ks, cancel := testKeepstore(c, s.cluster, nil)
 	defer cancel()
@@ -235,45 +248,74 @@ func (s *keepstoreSuite) TestBlockWrite_MultipleStorageClasses(c *C) {
 			Replication:    1,
 			StorageClasses: map[string]bool{"class2": true}},
 	}
+
+	// testData is a block that happens to have rendezvous order 111, 121, 222
+	testData := []byte("qux")
+	testHash := fmt.Sprintf("%x+%d", md5.Sum(testData), len(testData))
+
 	s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
 		"class1": {},
 		"class2": {},
 		"class3": {},
 	}
-	ks, cancel := testKeepstore(c, s.cluster, nil)
-	defer cancel()
-	stubLog := &stubLog{}
-	for _, mnt := range ks.mounts {
-		mnt.volume.(*stubVolume).stubLog = stubLog
-	}
-
-	rvz := ks.rendezvous(fooHash, ks.mountsW)
-	c.Assert(rvz[0].UUID[24:], Equals, "111")
-	c.Assert(rvz[1].UUID[24:], Equals, "121")
-	c.Assert(rvz[2].UUID[24:], Equals, "222")
 
 	ctx := authContext(arvadostest.ActiveTokenV2)
-	for _, trial := range []struct {
+	for idx, trial := range []struct {
 		classes   string // desired classes
 		expectLog string
 	}{
 		{"class1", "" +
-			"111 read acb\n" +
-			"121 read acb\n" +
-			"111 write acb\n" +
-			"111 read acb\n" +
-			"111 read acb\n"},
-		{"class2", ""},
-		{"class1,class2", ""},
-		{"class1,class2", ""},
-		{"class1,class2,class404", ""},
+			"111 read d85\n" +
+			"121 read d85\n" +
+			"111 write d85\n" +
+			"111 read d85\n" +
+			"111 touch d85\n"},
+		{"class2", "" +
+			"121 read d85\n" + // write#1
+			"222 read d85\n" +
+			"121 write d85\n" +
+			"121 read d85\n" + // write#2
+			"121 touch d85\n"},
+		{"class1,class2", "" +
+			"111 read d85\n" + // write#1
+			"121 read d85\n" +
+			"222 read d85\n" +
+			"121 write d85\n" +
+			"111 write d85\n" +
+			"111 read d85\n" + // write#2
+			"111 touch d85\n" +
+			"121 read d85\n" +
+			"121 touch d85\n"},
+		{"class1,class2,class404", "" +
+			"111 read d85\n" + // write#1
+			"121 read d85\n" +
+			"222 read d85\n" +
+			"121 write d85\n" +
+			"111 write d85\n" +
+			"111 read d85\n" + // write#2
+			"111 touch d85\n" +
+			"121 read d85\n" +
+			"121 touch d85\n"},
 	} {
-		c.Logf("%+v", trial)
-		stubLog.Reset()
+		c.Logf("=== %d: %+v", idx, trial)
+
+		ks, cancel := testKeepstore(c, s.cluster, nil)
+		defer cancel()
+		stubLog := &stubLog{}
+		for _, mnt := range ks.mounts {
+			mnt.volume.(*stubVolume).stubLog = stubLog
+		}
+
+		// Check that we chose the right block data
+		rvz := ks.rendezvous(testHash, ks.mountsW)
+		c.Assert(rvz[0].UUID[24:], Equals, "111")
+		c.Assert(rvz[1].UUID[24:], Equals, "121")
+		c.Assert(rvz[2].UUID[24:], Equals, "222")
+
 		for i := 0; i < 2; i++ {
 			_, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
-				Hash:           fooHash,
-				Data:           []byte("foo"),
+				Hash:           testHash,
+				Data:           testData,
 				StorageClasses: strings.Split(trial.classes, ","),
 			})
 			c.Check(err, IsNil)
@@ -314,9 +356,9 @@ func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
 func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 	c.Fatal("todo: volume with no specified classes implies 'default'")
 	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
-		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
-		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"}, // "default" is implicit
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"special": true, "extra": true}},
+		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
 	}
 	ks, cancel := testKeepstore(c, s.cluster, nil)
 	defer cancel()
@@ -473,9 +515,9 @@ type stubVolume struct {
 }
 
 func (v *stubVolume) log(op, hash string) {
-	// Note this intentionally crashes if len(hash)<32 -- if
-	// keepstore ever does that, tests should fail.
-	v.stubLog.Printf("%s %s %s", v.params.UUID, op, hash[29:32])
+	// Note this intentionally crashes if UUID or hash is short --
+	// if keepstore ever does that, tests should fail.
+	v.stubLog.Printf("%s %s %s", v.params.UUID[24:27], op, hash[:3])
 }
 
 func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
@@ -489,10 +531,22 @@ func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writ
 	v.mtx.Lock()
 	ent, ok := v.data[hash]
 	v.mtx.Unlock()
-	if !ok {
+	if !ok || !ent.trash.IsZero() {
 		return 0, os.ErrNotExist
 	}
-	return writeTo.Write(ent.data)
+	wrote := 0
+	for writesize := 1000; wrote < len(ent.data); writesize = writesize * 2 {
+		data := ent.data[wrote:]
+		if len(data) > writesize {
+			data = data[:writesize]
+		}
+		n, err := writeTo.Write(data)
+		wrote += n
+		if err != nil {
+			return wrote, err
+		}
+	}
+	return wrote, nil
 }
 
 func (v *stubVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
@@ -525,7 +579,7 @@ func (v *stubVolume) BlockTouch(hash string) error {
 	v.mtx.Lock()
 	defer v.mtx.Unlock()
 	ent, ok := v.data[hash]
-	if !ok {
+	if !ok || !ent.trash.IsZero() {
 		return os.ErrNotExist
 	}
 	ent.mtime = time.Now()
@@ -533,6 +587,20 @@ func (v *stubVolume) BlockTouch(hash string) error {
 	return nil
 }
 
+// Set mtime to the (presumably old) specified time.
+func (v *stubVolume) blockTouchWithTime(hash string, t time.Time) error {
+	v.log("touchwithtime", hash)
+	v.mtx.Lock()
+	defer v.mtx.Unlock()
+	ent, ok := v.data[hash]
+	if !ok {
+		return os.ErrNotExist
+	}
+	ent.mtime = t
+	v.data[hash] = ent
+	return nil
+}
+
 func (v *stubVolume) BlockTrash(hash string) error {
 	v.log("trash", hash)
 	if v.blockTrash != nil {
@@ -579,7 +647,7 @@ func (v *stubVolume) Index(ctx context.Context, prefix string, writeTo io.Writer
 	buf := &bytes.Buffer{}
 	v.mtx.Lock()
 	for hash, ent := range v.data {
-		if strings.HasPrefix(hash, prefix) {
+		if ent.trash.IsZero() && strings.HasPrefix(hash, prefix) {
 			fmt.Fprintf(buf, "%s+%d %d\n", hash, len(ent.data), ent.mtime.UnixNano())
 		}
 	}
@@ -596,9 +664,9 @@ func (v *stubVolume) Mtime(hash string) (time.Time, error) {
 		}
 	}
 	v.mtx.Lock()
-	defer v.mtx.Lock()
+	defer v.mtx.Unlock()
 	ent, ok := v.data[hash]
-	if !ok {
+	if !ok || !ent.trash.IsZero() {
 		return time.Time{}, os.ErrNotExist
 	}
 	return ent.mtime, nil
diff --git a/services/keepstore/mounts_test.go b/services/keepstore/mounts_test.go
index 8b0ad6a9be..71b83b6111 100644
--- a/services/keepstore/mounts_test.go
+++ b/services/keepstore/mounts_test.go
@@ -42,40 +42,48 @@ func (s *routerSuite) TestMounts(c *C) {
 	for _, m := range mntList {
 		c.Check(len(m.UUID), Equals, 27)
 		c.Check(m.UUID[:12], Equals, "zzzzz-nyw5e-")
-		c.Check(m.DeviceID, Equals, "mock-device-id")
+		c.Check(m.DeviceID, Matches, "0x[0-9a-f]+")
 		c.Check(m.ReadOnly, Equals, false)
 		c.Check(m.Replication, Equals, 1)
-		c.Check(m.StorageClasses, DeepEquals, map[string]bool{"default": true})
+		c.Check(m.StorageClasses, HasLen, 1)
+		for k := range m.StorageClasses {
+			c.Check(k, Matches, "testclass.*")
+		}
 	}
 	c.Check(mntList[0].UUID, Not(Equals), mntList[1].UUID)
 
-	// Bad auth
+	c.Logf("=== bad auth")
 	for _, tok := range []string{"", "xyzzy"} {
 		resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil, nil)
-		c.Check(resp.Code, Equals, http.StatusUnauthorized)
-		c.Check(resp.Body.String(), Equals, "Unauthorized\n")
+		if tok == "" {
+			c.Check(resp.Code, Equals, http.StatusUnauthorized)
+			c.Check(resp.Body.String(), Equals, "Unauthorized\n")
+		} else {
+			c.Check(resp.Code, Equals, http.StatusForbidden)
+			c.Check(resp.Body.String(), Equals, "Forbidden\n")
+		}
 	}
 
-	// Nonexistent mount UUID
+	c.Logf("=== nonexistent mount UUID")
 	resp = call(router, "GET", "/mounts/X/blocks", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusNotFound)
 
-	// Complete index of first mount
+	c.Logf("=== complete index of first mount")
 	resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
 
-	// Partial index of first mount (one block matches prefix)
+	c.Logf("=== partial index of first mount (one block matches prefix)")
 	resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
 
-	// Complete index of second mount (note trailing slash)
+	c.Logf("=== complete index of second mount (note trailing slash)")
 	resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Body.String(), Matches, barHash+`\+[0-9]+ [0-9]+\n\n`)
 
-	// Partial index of second mount (no blocks match prefix)
+	c.Logf("=== partial index of second mount (no blocks match prefix)")
 	resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Body.String(), Equals, "\n")
diff --git a/services/keepstore/proxy_remote_test.go b/services/keepstore/proxy_remote_test.go
index d882b3a8f9..2d1335afbf 100644
--- a/services/keepstore/proxy_remote_test.go
+++ b/services/keepstore/proxy_remote_test.go
@@ -94,7 +94,7 @@ func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
 			Insecure: true,
 		},
 	}
-	s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "mock"}}
+	s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "stub"}}
 }
 
 func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
diff --git a/services/keepstore/pull_worker.go b/services/keepstore/pull_worker.go
index 60cd97edf7..760ba90b46 100644
--- a/services/keepstore/pull_worker.go
+++ b/services/keepstore/pull_worker.go
@@ -15,7 +15,7 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 )
 
-type pullListItem struct {
+type PullListItem struct {
 	Locator   string   `json:"locator"`
 	Servers   []string `json:"servers"`
 	MountUUID string   `json:"mount_uuid"` // Destination mount, or "" for "anywhere"
@@ -23,7 +23,7 @@ type pullListItem struct {
 
 type puller struct {
 	keepstore *keepstore
-	todo      []pullListItem
+	todo      []PullListItem
 	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
 }
 
@@ -55,7 +55,7 @@ func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Regist
 	return p
 }
 
-func (p *puller) SetPullList(newlist []pullListItem) {
+func (p *puller) SetPullList(newlist []PullListItem) {
 	p.cond.L.Lock()
 	p.todo = newlist
 	p.cond.L.Unlock()
diff --git a/services/keepstore/router.go b/services/keepstore/router.go
index f741610ca9..d0043c978b 100644
--- a/services/keepstore/router.go
+++ b/services/keepstore/router.go
@@ -29,7 +29,11 @@ type router struct {
 }
 
 func newRouter(keepstore *keepstore, puller *puller, trasher *trasher) service.Handler {
-	rtr := &router{keepstore: keepstore}
+	rtr := &router{
+		keepstore: keepstore,
+		puller:    puller,
+		trasher:   trasher,
+	}
 	adminonly := func(h http.HandlerFunc) http.HandlerFunc {
 		return auth.RequireLiteralToken(keepstore.cluster.SystemRootToken, h).ServeHTTP
 	}
@@ -38,7 +42,7 @@ func newRouter(keepstore *keepstore, puller *puller, trasher *trasher) service.H
 	}
 
 	r := mux.NewRouter()
-	locatorPath := `/{locator:[0-9a-f]{32}(\+[0-9]+(\+.+)?)?}`
+	locatorPath := `/{locator:[0-9a-f]{32}(?:\+[0-9]+(?:\+.+)?)?}`
 	get := r.Methods(http.MethodGet, http.MethodHead).Subrouter()
 	get.HandleFunc(locatorPath, rtr.handleBlockRead)
 	get.HandleFunc(`/index`, adminonly(rtr.handleIndex))
@@ -58,6 +62,7 @@ func newRouter(keepstore *keepstore, puller *puller, trasher *trasher) service.H
 	delete := r.Methods(http.MethodDelete).Subrouter()
 	delete.HandleFunc(locatorPath, adminonly(rtr.handleBlockTrash))
 	r.NotFoundHandler = http.HandlerFunc(rtr.handleBadRequest)
+	r.MethodNotAllowedHandler = http.HandlerFunc(rtr.handleBadRequest)
 	rtr.Handler = auth.LoadToken(r)
 	return rtr
 }
@@ -100,7 +105,7 @@ func (rtr *router) handleBlockWrite(w http.ResponseWriter, req *http.Request) {
 		Reader:         req.Body,
 		DataSize:       dataSize,
 		RequestID:      req.Header.Get("X-Request-Id"),
-		StorageClasses: strings.Split(",", req.Header.Get("X-Keep-Storage-Classes")),
+		StorageClasses: trimSplit(req.Header.Get("X-Keep-Storage-Classes"), ","),
 		Replicas:       replicas,
 	})
 	if err != nil {
@@ -137,10 +142,14 @@ func (rtr *router) handleMounts(w http.ResponseWriter, req *http.Request) {
 }
 
 func (rtr *router) handleIndex(w http.ResponseWriter, req *http.Request) {
+	prefix := req.FormValue("prefix")
+	if prefix == "" {
+		prefix = mux.Vars(req)["prefix"]
+	}
 	cw := &countingWriter{writer: w}
 	err := rtr.keepstore.Index(req.Context(), IndexOptions{
 		MountUUID: mux.Vars(req)["uuid"],
-		Prefix:    mux.Vars(req)["prefix"],
+		Prefix:    prefix,
 		WriteTo:   cw,
 	})
 	if err != nil && cw.n.Load() == 0 {
@@ -167,7 +176,7 @@ func (rtr *router) handleStatus(w http.ResponseWriter, req *http.Request) {
 }
 
 func (rtr *router) handlePullList(w http.ResponseWriter, req *http.Request) {
-	var pl []pullListItem
+	var pl []PullListItem
 	err := json.NewDecoder(req.Body).Decode(&pl)
 	if err != nil {
 		rtr.handleError(w, req, err)
@@ -178,7 +187,7 @@ func (rtr *router) handlePullList(w http.ResponseWriter, req *http.Request) {
 }
 
 func (rtr *router) handleTrashList(w http.ResponseWriter, req *http.Request) {
-	var tl []trashListItem
+	var tl []TrashListItem
 	err := json.NewDecoder(req.Body).Decode(&tl)
 	if err != nil {
 		rtr.handleError(w, req, err)
@@ -224,3 +233,32 @@ func (cw *countingWriter) Write(p []byte) (int, error) {
 	cw.n.Add(int64(n))
 	return n, err
 }
+
+// Split s by sep, trim whitespace from each part, and drop empty
+// parts.
+func trimSplit(s, sep string) []string {
+	var r []string
+	for _, part := range strings.Split(s, sep) {
+		part = strings.TrimSpace(part)
+		if part != "" {
+			r = append(r, part)
+		}
+	}
+	return r
+}
+
+// setSizeOnWrite sets the Content-Length header to the given size on
+// first write.
+type setSizeOnWrite struct {
+	http.ResponseWriter
+	size  int
+	wrote bool
+}
+
+func (ss *setSizeOnWrite) Write(p []byte) (int, error) {
+	if !ss.wrote {
+		ss.Header().Set("Content-Length", fmt.Sprintf("%d", ss.size))
+		ss.wrote = true
+	}
+	return ss.ResponseWriter.Write(p)
+}
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
index aeb233e945..1cad9af251 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/router_test.go
@@ -7,13 +7,19 @@ package keepstore
 import (
 	"bytes"
 	"context"
+	"crypto/md5"
+	"errors"
+	"fmt"
+	"io"
 	"net/http"
 	"net/http/httptest"
 	"sort"
 	"strings"
+	"time"
 
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"git.arvados.org/arvados.git/sdk/go/httpserver"
 	"github.com/prometheus/client_golang/prometheus"
 	. "gopkg.in/check.v1"
 )
@@ -42,8 +48,8 @@ type routerSuite struct {
 func (s *routerSuite) SetUpTest(c *C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"testclass1": true}},
-		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"testclass2": true}},
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"testclass1": true}},
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub", StorageClasses: map[string]bool{"testclass2": true}},
 	}
 	s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
 		"testclass1": arvados.StorageClassConfig{
@@ -67,17 +73,50 @@ func (s *routerSuite) TestBlockRead_Token(c *C) {
 	// No token provided
 	resp := call(router, "GET", "http://example/"+locSigned, "", nil, nil)
 	c.Check(resp.Code, Equals, http.StatusUnauthorized)
-	c.Check(string(resp.Body.Bytes()), Matches, "no token provided")
+	c.Check(resp.Body.String(), Matches, "no token provided in Authorization header\n")
 
 	// Different token => invalid signature
 	resp = call(router, "GET", "http://example/"+locSigned, "badtoken", nil, nil)
 	c.Check(resp.Code, Equals, http.StatusBadRequest)
-	c.Check(string(resp.Body.Bytes()), Matches, "invalid signature")
+	c.Check(resp.Body.String(), Equals, "invalid signature\n")
 
 	// Correct token
 	resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
-	c.Check(string(resp.Body.Bytes()), Equals, "foo")
+	c.Check(resp.Body.String(), Equals, "foo")
+}
+
+// By the time we discover the checksum mismatch, it's too late to
+// change the response code, but the expected block size is given in
+// the Content-Length response header, so a generic http client can
+// detect the problem.
+func (s *routerSuite) TestBlockRead_ChecksumMismatch(c *C) {
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+
+	gooddata := make([]byte, 10_000_000)
+	gooddata[0] = 'a'
+	hash := fmt.Sprintf("%x", md5.Sum(gooddata))
+	locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fmt.Sprintf("%s+%d", hash, len(gooddata)))
+
+	for _, baddata := range [][]byte{
+		make([]byte, 3),
+		make([]byte, len(gooddata)),
+		make([]byte, len(gooddata)-1),
+		make([]byte, len(gooddata)+1),
+		make([]byte, len(gooddata)*2),
+	} {
+		c.Logf("=== baddata len %d", len(baddata))
+		err := router.keepstore.mountsW[0].BlockWrite(context.Background(), hash, baddata)
+		c.Assert(err, IsNil)
+
+		resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+		if !c.Check(resp.Code, Equals, http.StatusOK) {
+			c.Logf("resp.Body: %s", resp.Body.String())
+		}
+		c.Check(resp.Body.Len(), Not(Equals), len(gooddata))
+		c.Check(resp.Result().Header.Get("Content-Length"), Equals, fmt.Sprintf("%d", len(gooddata)))
+	}
 }
 
 func (s *routerSuite) TestBlockWrite_Headers(c *C) {
@@ -86,17 +125,17 @@ func (s *routerSuite) TestBlockWrite_Headers(c *C) {
 
 	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
 
-	resp := call(router, "GET", "http://example/"+fooHash, arvadostest.ActiveTokenV2, nil, http.Header{"X-Arvados-Replicas-Desired": []string{"2"}})
+	resp := call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Arvados-Replicas-Desired": []string{"2"}})
 	c.Check(resp.Code, Equals, http.StatusOK)
-	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "2")
-	c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, "testclass1=1, testclass2=1")
+	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+	c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, "testclass1=1")
 
-	resp = call(router, "GET", "http://example/"+fooHash, arvadostest.ActiveTokenV2, nil, http.Header{"X-Keep-Storage-Classes": []string{"testclass1"}})
+	resp = call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Keep-Storage-Classes": []string{"testclass1"}})
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
 	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1")
 
-	resp = call(router, "GET", "http://example/"+fooHash, arvadostest.ActiveTokenV2, nil, http.Header{"X-Keep-Storage-Classes": []string{" , testclass2 , "}})
+	resp = call(router, "PUT", "http://example/"+fooHash, arvadostest.ActiveTokenV2, []byte("foo"), http.Header{"X-Keep-Storage-Classes": []string{" , testclass2 , "}})
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
 	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass2=1")
@@ -140,24 +179,29 @@ func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
 	defer cancel()
 
 	for _, token := range []string{"badtoken", ""} {
-		for _, path := range []string{
-			"/pull",
-			"/trash",
-			"/index",
-			"/index/",
-			"/index/1234",
-			"/untrash/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
-			"/debug.json",
-			"/status.json",
+		for _, trial := range []string{
+			"PUT /pull",
+			"PUT /trash",
+			"GET /index",
+			"GET /index/",
+			"GET /index/1234",
+			"PUT /untrash/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+			"GET /debug.json",
+			"GET /status.json",
 		} {
-			c.Logf("=== %s", path)
-			req := httptest.NewRequest("GET", "http://example"+path, nil)
+			c.Logf("=== %s", trial)
+			methodpath := strings.Split(trial, " ")
+			req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
 			if token != "" {
 				req.Header.Set("Authorization", "Bearer "+token)
 			}
 			resp := httptest.NewRecorder()
 			router.ServeHTTP(resp, req)
-			c.Check(resp.Code, Equals, http.StatusUnauthorized)
+			if token == "" {
+				c.Check(resp.Code, Equals, http.StatusUnauthorized)
+			} else {
+				c.Check(resp.Code, Equals, http.StatusForbidden)
+			}
 		}
 	}
 	req := httptest.NewRequest("TOUCH", "http://example/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", nil)
@@ -167,11 +211,42 @@ func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
 }
 
 func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
-	c.Fatal("todo: volume operation fails 'busy', router returns 5xx")
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+	router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(_ context.Context, hash string, w io.Writer) (int, error) {
+		return 0, httpserver.ErrorWithStatus(errors.New("test error"), http.StatusBadGateway)
+	}
+	locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+	resp := call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusBadGateway)
 }
 
+// Check that the context passed to a volume method gets cancelled
+// when the http client hangs up.
 func (s *routerSuite) TestCancelOnDisconnect(c *C) {
-	c.Fatal("todo: volume operation context is cancelled when client disconnects")
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+
+	unblock := make(chan struct{})
+	router.keepstore.mountsW[0].volume.(*stubVolume).blockRead = func(ctx context.Context, hash string, w io.Writer) (int, error) {
+		<-unblock
+		c.Check(ctx.Err(), NotNil)
+		return 0, ctx.Err()
+	}
+	go func() {
+		time.Sleep(time.Second / 10)
+		cancel()
+		close(unblock)
+	}()
+	locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	req, err := http.NewRequestWithContext(ctx, "GET", "http://example/"+locSigned, nil)
+	c.Assert(err, IsNil)
+	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+	resp := httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.Code, Equals, 499)
 }
 
 func call(handler http.Handler, method, path, tok string, body []byte, hdr http.Header) *httptest.ResponseRecorder {
diff --git a/services/keepstore/s3aws_volume.go b/services/keepstore/s3aws_volume.go
index 65dd087d50..2417bb8149 100644
--- a/services/keepstore/s3aws_volume.go
+++ b/services/keepstore/s3aws_volume.go
@@ -529,12 +529,8 @@ func (v *S3AWSVolume) writeObject(ctx context.Context, key string, r io.Reader)
 func (v *S3AWSVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
 	// Do not use putWithPipe here; we want to pass an io.ReadSeeker to the S3
 	// sdk to avoid memory allocation there. See #17339 for more information.
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
-
 	rdr := bytes.NewReader(data)
-	r := NewCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
+	r := newCountingReaderAtSeeker(rdr, v.bucket.stats.TickOutBytes)
 	key := v.key(hash)
 	err := v.writeObject(ctx, key, r)
 	if err != nil {
@@ -734,9 +730,6 @@ func (v *S3AWSVolume) InternalStats() interface{} {
 
 // BlockTouch sets the timestamp for the given locator to the current time.
 func (v *S3AWSVolume) BlockTouch(hash string) error {
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
 	key := v.key(hash)
 	_, err := v.head(key)
 	err = v.translateError(err)
@@ -793,9 +786,6 @@ func (b *s3AWSbucket) Del(path string) error {
 
 // Trash a Keep block.
 func (v *S3AWSVolume) BlockTrash(loc string) error {
-	if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
-		return errMethodNotAllowed
-	}
 	if t, err := v.Mtime(loc); err != nil {
 		return err
 	} else if time.Since(t) < v.cluster.Collections.BlobSigningTTL.Duration() {
diff --git a/services/keepstore/s3aws_volume_test.go b/services/keepstore/s3aws_volume_test.go
index 9b6b737e52..f05cbee848 100644
--- a/services/keepstore/s3aws_volume_test.go
+++ b/services/keepstore/s3aws_volume_test.go
@@ -58,7 +58,6 @@ type StubbedS3AWSSuite struct {
 	s3server *httptest.Server
 	metadata *httptest.Server
 	cluster  *arvados.Cluster
-	handler  *router
 	volumes  []*TestableS3AWSVolume
 }
 
@@ -70,7 +69,6 @@ func (s *StubbedS3AWSSuite) SetUpTest(c *check.C) {
 		"zzzzz-nyw5e-000000000000000": {Driver: "S3"},
 		"zzzzz-nyw5e-111111111111111": {Driver: "S3"},
 	}
-	s.handler = newHandlerOrErrorHandler(context.Background(), s.cluster, s.cluster.SystemRootToken, prometheus.NewRegistry()).(*router)
 }
 
 func (s *StubbedS3AWSSuite) TestGeneric(c *check.C) {
@@ -222,7 +220,7 @@ func (s *StubbedS3AWSSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
 
 	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	err := v.BlockWrite(context.Background(), loc, make([]byte, 3))
+	_, err := v.BlockRead(context.Background(), loc, io.Discard)
 	c.Check(err, check.NotNil)
 	c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
 	c.Check(stats(), check.Matches, `.*"s3.requestFailure 404 NoSuchKey[^"]*":[^0].*`)
@@ -326,6 +324,7 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 	v := s.newTestableVolume(c, newVolumeParams{
 		Cluster:      s.cluster,
 		ConfigVolume: arvados.Volume{Replication: 2},
+		Logger:       ctxlog.TestLogger(c),
 		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
 		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
 	}, 5*time.Minute)
@@ -624,7 +623,7 @@ func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, params newVolumeParams
 // PutRaw skips the ContentMD5 test
 func (v *TestableS3AWSVolume) PutRaw(loc string, block []byte) {
 	key := v.key(loc)
-	r := NewCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
+	r := newCountingReader(bytes.NewReader(block), v.bucket.stats.TickOutBytes)
 
 	uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
 		u.PartSize = 5 * 1024 * 1024
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
index 1d180fc60d..8da0a6ab52 100644
--- a/services/keepstore/trash_worker.go
+++ b/services/keepstore/trash_worker.go
@@ -13,7 +13,7 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 )
 
-type trashListItem struct {
+type TrashListItem struct {
 	Locator    string `json:"locator"`
 	BlockMtime int64  `json:"block_mtime"`
 	MountUUID  string `json:"mount_uuid"` // Target mount, or "" for "everywhere"
@@ -21,8 +21,12 @@ type trashListItem struct {
 
 type trasher struct {
 	keepstore *keepstore
-	todo      []trashListItem
+	todo      []TrashListItem
 	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
+
+	// For the benefit of test cases: if this channel is non-nil,
+	// send len(todo) to it after processing each trash list item.
+	notifyTodoLen chan int
 }
 
 func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *trasher {
@@ -47,20 +51,7 @@ func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Regis
 		keepstore.logger.Info("not running trash worker because Collections.BlobTrash == false")
 		return t
 	}
-	for i := 0; i < keepstore.cluster.Collections.BlobTrashConcurrency; i++ {
-		go t.runWorker(ctx)
-	}
-	return t
-}
 
-func (t *trasher) SetTrashList(newlist []trashListItem) {
-	t.cond.L.Lock()
-	t.todo = newlist
-	t.cond.L.Unlock()
-	t.cond.Broadcast()
-}
-
-func (t *trasher) runWorker(ctx context.Context) {
 	var mntsAllowTrash []*mount
 	for _, mnt := range t.keepstore.mounts {
 		if mnt.AllowTrash {
@@ -69,18 +60,41 @@ func (t *trasher) runWorker(ctx context.Context) {
 	}
 	if len(mntsAllowTrash) == 0 {
 		t.keepstore.logger.Info("not running trash worker because there are no writable or trashable volumes")
-		return
+	} else {
+		for i := 0; i < keepstore.cluster.Collections.BlobTrashConcurrency; i++ {
+			go t.runWorker(ctx, mntsAllowTrash)
+		}
 	}
+	return t
+}
+
+func (t *trasher) SetTrashList(newlist []TrashListItem) {
+	t.cond.L.Lock()
+	t.todo = newlist
+	t.cond.L.Unlock()
+	t.cond.Broadcast()
+}
+
+func (t *trasher) runWorker(ctx context.Context, mntsAllowTrash []*mount) {
 	go func() {
 		<-ctx.Done()
 		t.cond.Broadcast()
 	}()
 	for {
 		t.cond.L.Lock()
-		for len(t.todo) == 0 && ctx.Err() == nil {
-			t.cond.Wait()
+		if t.notifyTodoLen != nil {
+			t.notifyTodoLen <- len(t.todo)
+		}
+		if len(t.todo) == 0 {
+			for len(t.todo) == 0 && ctx.Err() == nil {
+				t.cond.Wait()
+			}
+			if t.notifyTodoLen != nil {
+				t.notifyTodoLen <- len(t.todo)
+			}
 		}
 		if ctx.Err() != nil {
+			t.cond.L.Unlock()
 			return
 		}
 		item := t.todo[0]
@@ -89,6 +103,12 @@ func (t *trasher) runWorker(ctx context.Context) {
 
 		logger := t.keepstore.logger.WithField("locator", item.Locator)
 
+		li, err := parseLocator(item.Locator)
+		if err != nil {
+			logger.Warn("ignoring trash request for invalid locator")
+			continue
+		}
+
 		reqMtime := time.Unix(0, item.BlockMtime)
 		if time.Since(reqMtime) < t.keepstore.cluster.Collections.BlobSigningTTL.Duration() {
 			logger.Warnf("client asked to delete a %v old block (BlockMtime %d = %v), but my blobSignatureTTL is %v! Skipping.",
@@ -114,16 +134,16 @@ func (t *trasher) runWorker(ctx context.Context) {
 
 		for _, mnt := range mnts {
 			logger := logger.WithField("mount", mnt.UUID)
-			mtime, err := mnt.Mtime(item.Locator)
+			mtime, err := mnt.Mtime(li.hash)
 			if err != nil {
 				logger.WithError(err).Error("error getting stored mtime")
 				continue
 			}
-			if item.BlockMtime != mtime.UnixNano() {
-				logger.Infof("stored mtime %v does not match trash list value %v; skipping", mtime.UnixNano(), item.BlockMtime)
+			if !mtime.Equal(reqMtime) {
+				logger.Infof("stored mtime (%v) does not match trash list mtime (%v); skipping", mtime, reqMtime)
 				continue
 			}
-			err = mnt.BlockTrash(item.Locator)
+			err = mnt.BlockTrash(li.hash)
 			if err != nil {
 				logger.WithError(err).Info("error trashing block")
 				continue
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
index e3226aaea7..a0917eb0d2 100644
--- a/services/keepstore/trash_worker_test.go
+++ b/services/keepstore/trash_worker_test.go
@@ -5,41 +5,202 @@
 package keepstore
 
 import (
-	check "gopkg.in/check.v1"
+	"context"
+	"crypto/md5"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"sort"
+	"time"
+
+	"git.arvados.org/arvados.git/sdk/go/arvados"
+	. "gopkg.in/check.v1"
 )
 
-type TrashWorkerTestData struct {
-	Locator1    string
-	Block1      []byte
-	BlockMtime1 int64
+func (s *routerSuite) TestTrashList_Clear(c *C) {
+	s.cluster.Collections.BlobTrash = false
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
 
-	Locator2    string
-	Block2      []byte
-	BlockMtime2 int64
+	resp := call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, []byte(`
+		[
+		 {
+		  "locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
+		  "block_mtime":1707249451308502672,
+		  "mount_uuid":"zzzzz-nyw5e-000000000000000"
+		 }
+		]
+		`), nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(router.trasher.todo, DeepEquals, []TrashListItem{{
+		Locator:    "acbd18db4cc2f85cedef654fccc4a4d8+3",
+		BlockMtime: 1707249451308502672,
+		MountUUID:  "zzzzz-nyw5e-000000000000000",
+	}})
 
-	CreateData      bool
-	CreateInVolume1 bool
+	resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, []byte("[]"), nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(router.trasher.todo, HasLen, 0)
+}
 
-	UseTrashLifeTime bool
-	DifferentMtimes  bool
+func (s *routerSuite) TestTrashList_Execute(c *C) {
+	s.cluster.Collections.BlobTrashConcurrency = 1
+	s.cluster.Volumes = map[string]arvados.Volume{
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
+		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "stub", ReadOnly: true},
+		"zzzzz-nyw5e-333333333333333": {Replication: 1, Driver: "stub", ReadOnly: true, AllowTrashWhenReadOnly: true},
+	}
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
 
-	DeleteLocator    string
-	SpecifyMountUUID bool
+	var mounts []struct {
+		UUID     string
+		DeviceID string `json:"device_id"`
+	}
+	resp := call(router, "GET", "http://example/mounts", s.cluster.SystemRootToken, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	err := json.Unmarshal(resp.Body.Bytes(), &mounts)
+	c.Assert(err, IsNil)
+	c.Assert(mounts, HasLen, 4)
 
-	ExpectLocator1 bool
-	ExpectLocator2 bool
-}
+	// Sort mounts by UUID
+	sort.Slice(mounts, func(i, j int) bool {
+		return mounts[i].UUID < mounts[j].UUID
+	})
+	c.Check(mounts[0].UUID, Equals, "zzzzz-nyw5e-000000000000000")
+	c.Check(mounts[1].UUID, Equals, "zzzzz-nyw5e-111111111111111")
+	c.Check(mounts[2].UUID, Equals, "zzzzz-nyw5e-222222222222222")
 
-func (s *routerSuite) TestTrashList_Clear(c *check.C) {
-	c.Fatal("todo: update trash list")
-	c.Fatal("todo: clear trash list")
-}
+	// Make vols (stub volumes) in same order as mounts
+	var vols []*stubVolume
+	for _, mount := range mounts {
+		vols = append(vols, router.keepstore.mounts[mount.UUID].volume.(*stubVolume))
+	}
+
+	var trashList []TrashListItem
+
+	tNew := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() / 2)
+	tOld := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Second)
+
+	var checks []func()
+
+	for _, trial := range []struct {
+		comment        string
+		storeMtime     []time.Time
+		trashListItems []TrashListItem
+		expectData     []bool
+	}{
+		{
+			comment:    "timestamp matches, but is not old enough to trash => skip",
+			storeMtime: []time.Time{tNew},
+			trashListItems: []TrashListItem{
+				{
+					BlockMtime: tNew.UnixNano(),
+					MountUUID:  mounts[0].UUID,
+				},
+			},
+			expectData: []bool{true},
+		},
+		{
+			comment:    "timestamp matches, and is old enough => trash",
+			storeMtime: []time.Time{tOld},
+			trashListItems: []TrashListItem{
+				{
+					BlockMtime: tOld.UnixNano(),
+					MountUUID:  mounts[0].UUID,
+				},
+			},
+			expectData: []bool{false},
+		},
+		{
+			comment:    "timestamp matches and is old enough on mount 0, but the request specifies mount 1, where timestamp does not match => skip",
+			storeMtime: []time.Time{tOld, tOld.Add(-time.Second)},
+			trashListItems: []TrashListItem{
+				{
+					BlockMtime: tOld.UnixNano(),
+					MountUUID:  mounts[1].UUID,
+				},
+			},
+			expectData: []bool{true, true},
+		},
+		{
+			comment:    "MountUUID unspecified => trash from any mount where timestamp matches, leave alone elsewhere",
+			storeMtime: []time.Time{tOld, tOld.Add(-time.Second)},
+			trashListItems: []TrashListItem{
+				{
+					BlockMtime: tOld.UnixNano(),
+				},
+			},
+			expectData: []bool{false, true},
+		},
+		{
+			comment:    "MountUUID unspecified => trash from multiple mounts if timestamp matches, but skip readonly volumes unless AllowTrashWhenReadOnly",
+			storeMtime: []time.Time{tOld, tOld, tOld, tOld},
+			trashListItems: []TrashListItem{
+				{
+					BlockMtime: tOld.UnixNano(),
+				},
+			},
+			expectData: []bool{false, false, true, false},
+		},
+		{
+			comment:    "readonly MountUUID specified => skip",
+			storeMtime: []time.Time{tOld, tOld, tOld},
+			trashListItems: []TrashListItem{
+				{
+					BlockMtime: tOld.UnixNano(),
+					MountUUID:  mounts[2].UUID,
+				},
+			},
+			expectData: []bool{true, true, true},
+		},
+	} {
+		trial := trial
+		data := []byte(fmt.Sprintf("trial %+v", trial))
+		hash := fmt.Sprintf("%x", md5.Sum(data))
+		for i, t := range trial.storeMtime {
+			if t.IsZero() {
+				continue
+			}
+			err := vols[i].BlockWrite(context.Background(), hash, data)
+			c.Assert(err, IsNil)
+			err = vols[i].blockTouchWithTime(hash, t)
+			c.Assert(err, IsNil)
+		}
+		for _, item := range trial.trashListItems {
+			item.Locator = fmt.Sprintf("%s+%d", hash, len(data))
+			trashList = append(trashList, item)
+		}
+		for i, expect := range trial.expectData {
+			i, expect := i, expect
+			checks = append(checks, func() {
+				ent := vols[i].data[hash]
+				dataPresent := ent.data != nil && ent.trash.IsZero()
+				c.Check(dataPresent, Equals, expect, Commentf("%s mount %d expect %v got len(ent.data)=%d ent.trash=%v // %s", hash, i, expect, len(ent.data), ent.trash, trial.comment))
+			})
+		}
+	}
+
+	router.trasher.notifyTodoLen = make(chan int)
+
+	listjson, err := json.Marshal(trashList)
+	resp = call(router, "PUT", "http://example/trash", s.cluster.SystemRootToken, listjson, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+
+	for lenwas := -1; lenwas != 0; {
+		select {
+		case lenis := <-router.trasher.notifyTodoLen:
+			if lenis > 0 || lenwas > 0 {
+				lenwas = lenis
+			}
+		case <-time.After(time.Second):
+			c.Fatal("timed out")
+		}
+	}
 
-func (s *routerSuite) TestTrashList(c *check.C) {
-	c.Fatal("todo: trash nonexistent block")
-	c.Fatal("todo: trash existing block")
-	c.Fatal("todo: trash block on specified volume")
-	c.Fatal("todo: trash block on volume with AllowTrash=false")
-	c.Fatal("todo: trash block with unexpected timestamp")
-	c.Fatal("todo: trash block with recent timestamp")
+	c.Logf("doing checks")
+	for _, check := range checks {
+		check()
+	}
 }
diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index 08b66d6dc5..98edfae14d 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -28,11 +28,12 @@ import (
 )
 
 func init() {
-	driver["Directory"] = newDirectoryVolume
+	driver["Directory"] = newUnixVolume
 }
 
-func newDirectoryVolume(params newVolumeParams) (volume, error) {
+func newUnixVolume(params newVolumeParams) (volume, error) {
 	v := &UnixVolume{
+		uuid:    params.UUID,
 		cluster: params.Cluster,
 		volume:  params.ConfigVolume,
 		logger:  params.Logger,
@@ -70,6 +71,7 @@ type UnixVolume struct {
 	Root      string // path to the volume's root directory
 	Serialize bool
 
+	uuid    string
 	cluster *arvados.Cluster
 	volume  arvados.Volume
 	logger  logrus.FieldLogger
@@ -89,8 +91,9 @@ type UnixVolume struct {
 // "fa0b6166-3b55-4994-bd3f-92f4e00a1bb0/keep".
 func (v *UnixVolume) DeviceID() string {
 	giveup := func(f string, args ...interface{}) string {
-		v.logger.Infof(f+"; using blank DeviceID for volume %s", append(args, v)...)
-		return ""
+		v.logger.Infof(f+"; using hostname:path for volume %s", append(args, v.uuid)...)
+		host, _ := os.Hostname()
+		return host + ":" + v.Root
 	}
 	buf, err := exec.Command("findmnt", "--noheadings", "--target", v.Root).CombinedOutput()
 	if err != nil {
@@ -161,9 +164,6 @@ func (v *UnixVolume) DeviceID() string {
 
 // BlockTouch sets the timestamp for the given locator to the current time
 func (v *UnixVolume) BlockTouch(hash string) error {
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
 	p := v.blockPath(hash)
 	f, err := v.os.OpenFile(p, os.O_RDWR|os.O_APPEND, 0644)
 	if err != nil {
@@ -208,7 +208,7 @@ func (v *UnixVolume) getFunc(ctx context.Context, path string, fn func(io.Reader
 		return err
 	}
 	defer f.Close()
-	return fn(NewCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes))
+	return fn(newCountingReader(ioutil.NopCloser(f), v.os.stats.TickInBytes))
 }
 
 // stat is os.Stat() with some extra sanity checks.
@@ -245,10 +245,7 @@ func (v *UnixVolume) BlockRead(ctx context.Context, hash string, w io.Writer) (i
 // BlockWrite stores a block on the volume. If it already exists, its
 // timestamp is updated.
 func (v *UnixVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
-	if v.IsFull() {
+	if v.isFull() {
 		return errFull
 	}
 	bdir := v.blockDir(hash)
@@ -385,9 +382,6 @@ func (v *UnixVolume) BlockTrash(loc string) error {
 	// be re-written), or (b) Touch() will update the file's timestamp and
 	// Trash() will read the correct up-to-date timestamp and choose not to
 	// trash the file.
-	if v.volume.ReadOnly && !v.volume.AllowTrashWhenReadOnly {
-		return errMethodNotAllowed
-	}
 	if err := v.lock(context.TODO()); err != nil {
 		return err
 	}
@@ -424,10 +418,6 @@ func (v *UnixVolume) BlockTrash(loc string) error {
 // Look for path/{loc}.trash.{deadline} in storage,
 // and rename the first such file as path/{loc}
 func (v *UnixVolume) BlockUntrash(hash string) error {
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
-
 	v.os.stats.TickOps("readdir")
 	v.os.stats.Tick(&v.os.stats.ReaddirOps)
 	files, err := ioutil.ReadDir(v.blockDir(hash))
@@ -470,9 +460,9 @@ func (v *UnixVolume) blockPath(loc string) string {
 	return filepath.Join(v.blockDir(loc), loc)
 }
 
-// IsFull returns true if the free space on the volume is less than
+// isFull returns true if the free space on the volume is less than
 // MinFreeKilobytes.
-func (v *UnixVolume) IsFull() (isFull bool) {
+func (v *UnixVolume) isFull() (isFull bool) {
 	fullSymlink := v.Root + "/full"
 
 	// Check if the volume has been marked as full in the last hour.
@@ -508,7 +498,7 @@ func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
 	if err == nil {
 		// Statfs output is not guaranteed to measure free
 		// space in terms of 1K blocks.
-		free = fs.Bavail * uint64(fs.Bsize) / 1024
+		free = fs.Bavail * uint64(fs.Bsize)
 	}
 	return
 }
diff --git a/services/keepstore/unix_volume_test.go b/services/keepstore/unix_volume_test.go
index 555a4bc6dd..2fe31d0bdf 100644
--- a/services/keepstore/unix_volume_test.go
+++ b/services/keepstore/unix_volume_test.go
@@ -22,52 +22,40 @@ import (
 	check "gopkg.in/check.v1"
 )
 
-type TestableUnixVolume struct {
+type testableUnixVolume struct {
 	UnixVolume
 	t TB
 }
 
-// PutRaw writes a Keep block directly into a UnixVolume, even if
-// the volume is readonly.
-func (v *TestableUnixVolume) PutRaw(locator string, data []byte) {
-	defer func(orig bool) {
-		v.volume.ReadOnly = orig
-	}(v.volume.ReadOnly)
-	v.volume.ReadOnly = false
-	err := v.BlockWrite(context.Background(), locator, data)
+func (v *testableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
+	err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{Actime: lastPut.Unix(), Modtime: lastPut.Unix()})
 	if err != nil {
 		v.t.Fatal(err)
 	}
 }
 
-func (v *TestableUnixVolume) TouchWithDate(locator string, lastPut time.Time) {
-	err := syscall.Utime(v.blockPath(locator), &syscall.Utimbuf{lastPut.Unix(), lastPut.Unix()})
-	if err != nil {
-		v.t.Fatal(err)
-	}
-}
-
-func (v *TestableUnixVolume) Teardown() {
+func (v *testableUnixVolume) Teardown() {
 	if err := os.RemoveAll(v.Root); err != nil {
 		v.t.Error(err)
 	}
 }
 
-func (v *TestableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
+func (v *testableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
 	return "open", "create"
 }
 
-var _ = check.Suite(&UnixVolumeSuite{})
+var _ = check.Suite(&unixVolumeSuite{})
 
-type UnixVolumeSuite struct {
+type unixVolumeSuite struct {
 	params  newVolumeParams
-	volumes []*TestableUnixVolume
+	volumes []*testableUnixVolume
 }
 
-func (s *UnixVolumeSuite) SetUpTest(c *check.C) {
+func (s *unixVolumeSuite) SetUpTest(c *check.C) {
 	logger := ctxlog.TestLogger(c)
 	reg := prometheus.NewRegistry()
 	s.params = newVolumeParams{
+		UUID:        "zzzzz-nyw5e-999999999999999",
 		Cluster:     testCluster(c),
 		Logger:      logger,
 		MetricsVecs: newVolumeMetricsVecs(reg),
@@ -75,23 +63,24 @@ func (s *UnixVolumeSuite) SetUpTest(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TearDownTest(c *check.C) {
+func (s *unixVolumeSuite) TearDownTest(c *check.C) {
 	for _, v := range s.volumes {
 		v.Teardown()
 	}
 }
 
-func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, params newVolumeParams, serialize bool) *TestableUnixVolume {
+func (s *unixVolumeSuite) newTestableUnixVolume(c *check.C, params newVolumeParams, serialize bool) *testableUnixVolume {
 	d, err := ioutil.TempDir("", "volume_test")
 	c.Check(err, check.IsNil)
 	var locker sync.Locker
 	if serialize {
 		locker = &sync.Mutex{}
 	}
-	v := &TestableUnixVolume{
+	v := &testableUnixVolume{
 		UnixVolume: UnixVolume{
 			Root:    d,
 			locker:  locker,
+			uuid:    params.UUID,
 			cluster: params.Cluster,
 			logger:  params.Logger,
 			volume:  params.ConfigVolume,
@@ -104,36 +93,32 @@ func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, params newVolumePara
 	return v
 }
 
-// serialize = false; readonly = false
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
 	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
 		return s.newTestableUnixVolume(c, params, false)
 	})
 }
 
-// serialize = false; readonly = true
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsReadOnly(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_ReadOnly(c *check.C) {
 	DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
-		return s.newTestableUnixVolume(c, params, true)
+		return s.newTestableUnixVolume(c, params, false)
 	})
 }
 
-// serialize = true; readonly = false
-func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsSerialized(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_Serialized(c *check.C) {
 	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
-		return s.newTestableUnixVolume(c, params, false)
+		return s.newTestableUnixVolume(c, params, true)
 	})
 }
 
-// serialize = true; readonly = true
-func (s *UnixVolumeSuite) TestUnixVolumeHandlersWithGenericVolumeTests(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeWithGenericTests_Readonly_Serialized(c *check.C) {
 	DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
 		return s.newTestableUnixVolume(c, params, true)
 	})
 }
 
-func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.params, false)
+func (s *unixVolumeSuite) TestGetNotFound(c *check.C) {
+	v := s.newTestableUnixVolume(c, s.params, true)
 	defer v.Teardown()
 	v.BlockWrite(context.Background(), TestHash, TestBlock)
 
@@ -149,7 +134,7 @@ func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestPut(c *check.C) {
+func (s *unixVolumeSuite) TestPut(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
@@ -166,7 +151,7 @@ func (s *UnixVolumeSuite) TestPut(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
+func (s *unixVolumeSuite) TestPutBadVolume(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
@@ -176,27 +161,27 @@ func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
 	c.Check(err, check.IsNil)
 }
 
-func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
+func (s *unixVolumeSuite) TestIsFull(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	fullPath := v.Root + "/full"
 	now := fmt.Sprintf("%d", time.Now().Unix())
 	os.Symlink(now, fullPath)
-	if !v.IsFull() {
-		c.Errorf("%s: claims not to be full", v)
+	if !v.isFull() {
+		c.Error("volume claims not to be full")
 	}
 	os.Remove(fullPath)
 
 	// Test with an expired /full link.
 	expired := fmt.Sprintf("%d", time.Now().Unix()-3605)
 	os.Symlink(expired, fullPath)
-	if v.IsFull() {
-		c.Errorf("%s: should no longer be full", v)
+	if v.isFull() {
+		c.Error("volume should no longer be full")
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
@@ -210,7 +195,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
@@ -227,7 +212,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
@@ -262,16 +247,15 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
+func (s *unixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.params, true)
 	defer v.Teardown()
 	v.locker.Lock()
+	defer v.locker.Unlock()
 	ctx, cancel := context.WithCancel(context.Background())
 	go func() {
 		time.Sleep(50 * time.Millisecond)
 		cancel()
-		time.Sleep(50 * time.Millisecond)
-		v.locker.Unlock()
 	}()
 	err := v.BlockWrite(ctx, TestHash, TestBlock)
 	if err != context.Canceled {
@@ -279,29 +263,27 @@ func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.params, false)
+func (s *unixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
+	v := s.newTestableUnixVolume(c, s.params, true)
 	defer v.Teardown()
-	bpath := v.blockPath(TestHash)
-	v.PutRaw(TestHash, TestBlock)
-	os.Remove(bpath)
-	err := syscall.Mkfifo(bpath, 0600)
+	err := v.BlockWrite(context.Background(), TestHash, TestBlock)
 	if err != nil {
-		c.Fatalf("Mkfifo %s: %s", bpath, err)
+		c.Fatal(err)
 	}
-	defer os.Remove(bpath)
 	ctx, cancel := context.WithCancel(context.Background())
+	v.locker.Lock()
+	defer v.locker.Unlock()
 	go func() {
 		time.Sleep(50 * time.Millisecond)
 		cancel()
 	}()
 	n, err := v.BlockRead(ctx, TestHash, io.Discard)
-	if n == len(TestBlock) || err != context.Canceled {
+	if n > 0 || err != context.Canceled {
 		c.Errorf("BlockRead() returned %d, %s -- expected short read / canceled", n, err)
 	}
 }
 
-func (s *UnixVolumeSuite) TestStats(c *check.C) {
+func (s *unixVolumeSuite) TestStats(c *check.C) {
 	vol := s.newTestableUnixVolume(c, s.params, false)
 	stats := func() string {
 		buf, err := json.Marshal(vol.InternalStats())
@@ -346,7 +328,7 @@ func (s *UnixVolumeSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"FlockOps":2,.*`)
 }
 
-func (s *UnixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
+func (s *unixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
 	vol := s.newTestableUnixVolume(c, s.params, false)
 
 	err := os.Mkdir(vol.UnixVolume.Root+"/aaa", 0777)
diff --git a/services/keepstore/volume_generic_test.go b/services/keepstore/volume_generic_test.go
index 00ef12f4e1..22667743dd 100644
--- a/services/keepstore/volume_generic_test.go
+++ b/services/keepstore/volume_generic_test.go
@@ -15,6 +15,7 @@ import (
 	"sort"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
 
 	"git.arvados.org/arvados.git/sdk/go/arvados"
@@ -77,10 +78,6 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 
 	s.testMetrics(t, readonly, factory)
 
-	if readonly {
-		s.testUpdateReadOnly(t, factory)
-	}
-
 	s.testGetConcurrent(t, factory)
 	if !readonly {
 		s.testPutConcurrent(t, factory)
@@ -109,6 +106,7 @@ func (s *genericVolumeSuite) setup(t TB) {
 
 func (s *genericVolumeSuite) newVolume(t TB, factory TestableVolumeFactory) TestableVolume {
 	return factory(t, newVolumeParams{
+		UUID:         "zzzzz-nyw5e-999999999999999",
 		Cluster:      s.cluster,
 		ConfigVolume: s.volume,
 		Logger:       s.logger,
@@ -124,14 +122,17 @@ func (s *genericVolumeSuite) testGet(t TB, factory TestableVolumeFactory) {
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.BlockWrite(context.Background(), fooHash, []byte("foo"))
+	err := v.BlockWrite(context.Background(), TestHash, TestBlock)
+	if err != nil {
+		t.Error(err)
+	}
 
 	buf := bytes.NewBuffer(nil)
-	_, err := v.BlockRead(context.Background(), TestHash, buf)
+	_, err = v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
-	if buf.String() != "foo" {
+	if bytes.Compare(buf.Bytes(), TestBlock) != 0 {
 		t.Errorf("expected %s, got %s", "foo", buf.String())
 	}
 }
@@ -433,18 +434,18 @@ func (s *genericVolumeSuite) testDeleteOldBlock(t TB, factory TestableVolumeFact
 
 	_, err := v.Mtime(TestHash)
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	indexBuf := new(bytes.Buffer)
 	v.Index(context.Background(), "", indexBuf)
 	if strings.Contains(string(indexBuf.Bytes()), TestHash) {
-		t.Fatalf("Found trashed block in Index")
+		t.Errorf("Found trashed block in Index")
 	}
 
 	err = v.BlockTouch(TestHash)
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 }
 
@@ -518,7 +519,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 
 	_, err = v.BlockRead(context.Background(), TestHash, io.Discard)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Check that the operations counter increased
@@ -532,50 +533,6 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 	}
 }
 
-// Putting, updating, touching, and deleting blocks from a read-only volume result in error.
-// Test is intended for only read-only volumes
-func (s *genericVolumeSuite) testUpdateReadOnly(t TB, factory TestableVolumeFactory) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	v.BlockWrite(context.Background(), TestHash, TestBlock)
-
-	// Get from read-only volume should succeed
-	_, err := v.BlockRead(context.Background(), TestHash, io.Discard)
-	if err != nil {
-		t.Errorf("got err %v, expected nil", err)
-	}
-
-	// Put a new block to read-only volume should result in error
-	err = v.BlockWrite(context.Background(), TestHash2, TestBlock2)
-	if err == nil {
-		t.Errorf("Expected error when putting block in a read-only volume")
-	}
-	_, err = v.BlockRead(context.Background(), TestHash2, io.Discard)
-	if err == nil {
-		t.Errorf("Expected error when getting block whose put in read-only volume failed")
-	}
-
-	// Touch a block in read-only volume should result in error
-	err = v.BlockTouch(TestHash)
-	if err == nil {
-		t.Errorf("Expected error when touching block in a read-only volume")
-	}
-
-	// Delete a block from a read-only volume should result in error
-	err = v.BlockTrash(TestHash)
-	if err == nil {
-		t.Errorf("Expected error when deleting block from a read-only volume")
-	}
-
-	// Overwriting an existing block in read-only volume should result in error
-	err = v.BlockWrite(context.Background(), TestHash, TestBlock)
-	if err == nil {
-		t.Errorf("Expected error when putting block in a read-only volume")
-	}
-}
-
 // Launch concurrent Gets
 // Test should pass for both writable and read-only volumes
 func (s *genericVolumeSuite) testGetConcurrent(t TB, factory TestableVolumeFactory) {
@@ -637,59 +594,38 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	sem := make(chan int)
-	go func(sem chan int) {
-		err := v.BlockWrite(context.Background(), TestHash, TestBlock)
-		if err != nil {
-			t.Errorf("err1: %v", err)
-		}
-		sem <- 1
-	}(sem)
-
-	go func(sem chan int) {
-		err := v.BlockWrite(context.Background(), TestHash2, TestBlock2)
-		if err != nil {
-			t.Errorf("err2: %v", err)
-		}
-		sem <- 1
-	}(sem)
-
-	go func(sem chan int) {
-		err := v.BlockWrite(context.Background(), TestHash3, TestBlock3)
+	blks := []struct {
+		hash string
+		data []byte
+	}{
+		{hash: TestHash, data: TestBlock},
+		{hash: TestHash2, data: TestBlock2},
+		{hash: TestHash3, data: TestBlock3},
+	}
+
+	var wg sync.WaitGroup
+	for _, blk := range blks {
+		blk := blk
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			err := v.BlockWrite(context.Background(), blk.hash, blk.data)
+			if err != nil {
+				t.Errorf("%s: %v", blk.hash, err)
+			}
+		}()
+	}
+	wg.Wait()
+
+	// Check that we actually wrote the blocks.
+	for _, blk := range blks {
+		buf := bytes.NewBuffer(nil)
+		_, err := v.BlockRead(context.Background(), blk.hash, buf)
 		if err != nil {
-			t.Errorf("err3: %v", err)
+			t.Errorf("get %s: %v", blk.hash, err)
+		} else if buf.String() != string(blk.data) {
+			t.Errorf("get %s: expected %s, got %s", blk.hash, blk.data, buf)
 		}
-		sem <- 1
-	}(sem)
-
-	// Wait for all goroutines to finish
-	for done := 0; done < 3; done++ {
-		<-sem
-	}
-
-	// Double check that we actually wrote the blocks we expected to write.
-	buf := bytes.NewBuffer(nil)
-	_, err := v.BlockRead(context.Background(), TestHash, buf)
-	if err != nil {
-		t.Errorf("Get #1: %v", err)
-	} else if buf.String() != string(TestBlock) {
-		t.Errorf("Get #1: expected %s, got %s", TestBlock, buf)
-	}
-
-	buf.Reset()
-	_, err = v.BlockRead(context.Background(), TestHash2, buf)
-	if err != nil {
-		t.Errorf("Get #2: %v", err)
-	} else if buf.String() != string(TestBlock2) {
-		t.Errorf("Get #2: expected %s, got %s", TestBlock2, buf)
-	}
-
-	buf.Reset()
-	_, err = v.BlockRead(context.Background(), TestHash3, buf)
-	if err != nil {
-		t.Errorf("Get #3: %v", err)
-	} else if buf.String() != string(TestBlock3) {
-		t.Errorf("Get #3: expected %s, got %s", TestBlock3, buf)
 	}
 }
 
@@ -705,7 +641,7 @@ func (s *genericVolumeSuite) testPutFullBlock(t TB, factory TestableVolumeFactor
 	hash := fmt.Sprintf("%x", md5.Sum(wdata))
 	err := v.BlockWrite(context.Background(), hash, wdata)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	buf := bytes.NewBuffer(nil)
@@ -735,7 +671,7 @@ func (s *genericVolumeSuite) testTrashUntrash(t TB, readonly bool, factory Testa
 	buf := bytes.NewBuffer(nil)
 	_, err := v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	if buf.String() != string(TestBlock) {
 		t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
@@ -747,6 +683,7 @@ func (s *genericVolumeSuite) testTrashUntrash(t TB, readonly bool, factory Testa
 		t.Error(err)
 		return
 	}
+	buf.Reset()
 	_, err = v.BlockRead(context.Background(), TestHash, buf)
 	if err == nil || !os.IsNotExist(err) {
 		t.Errorf("os.IsNotExist(%v) should have been true", err)
@@ -755,13 +692,14 @@ func (s *genericVolumeSuite) testTrashUntrash(t TB, readonly bool, factory Testa
 	// Untrash
 	err = v.BlockUntrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Get the block - after trash and untrash sequence
+	buf.Reset()
 	_, err = v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	if buf.String() != string(TestBlock) {
 		t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
@@ -780,7 +718,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 			return err
 		}
 		if buf.String() != string(TestBlock) {
-			t.Fatalf("Got data %+q, expected %+q", buf, TestBlock)
+			t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
 		}
 
 		_, err = v.Mtime(TestHash)
@@ -806,23 +744,23 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
 	err := checkGet()
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Trash the block
 	err = v.BlockTrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	err = checkGet()
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	err = v.BlockTouch(TestHash)
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	v.EmptyTrash()
@@ -831,17 +769,17 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// because the deadline hasn't been reached.
 	err = v.BlockUntrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	err = checkGet()
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	err = v.BlockTouch(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Because we Touch'ed, need to backdate again for next set of tests
@@ -852,14 +790,14 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// it's also acceptable for Untrash to succeed.
 	err = v.BlockUntrash(TestHash)
 	if err != nil && !os.IsNotExist(err) {
-		t.Fatalf("Expected success or os.IsNotExist(), but got: %v", err)
+		t.Errorf("Expected success or os.IsNotExist(), but got: %v", err)
 	}
 
 	// The additional Untrash should not interfere with our
 	// already-untrashed copy.
 	err = checkGet()
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Untrash might have updated the timestamp, so backdate again
@@ -871,22 +809,22 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
 	err = v.BlockTrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	err = checkGet()
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	// Even though 1ns has passed, we can untrash because we
 	// haven't called EmptyTrash yet.
 	err = v.BlockUntrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	err = checkGet()
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Trash it again, and this time call EmptyTrash so it really
@@ -896,20 +834,20 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	_ = v.BlockTrash(TestHash)
 	err = checkGet()
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 	v.EmptyTrash()
 
 	// Untrash won't find it
 	err = v.BlockUntrash(TestHash)
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	// Get block won't find it
 	err = checkGet()
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	// Third set: If the same data block gets written again after
@@ -922,11 +860,11 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	s.cluster.Collections.BlobTrashLifetime.Set("1ns")
 	err = v.BlockTrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	err = checkGet()
 	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
 	v.BlockWrite(context.Background(), TestHash, TestBlock)
@@ -936,7 +874,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	v.EmptyTrash()
 	err = checkGet()
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// Fourth set: If the same data block gets trashed twice with
@@ -950,7 +888,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	s.cluster.Collections.BlobTrashLifetime.Set("1ns")
 	err = v.BlockTrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	v.BlockWrite(context.Background(), TestHash, TestBlock)
@@ -959,7 +897,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	s.cluster.Collections.BlobTrashLifetime.Set("1h")
 	err = v.BlockTrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 
 	// EmptyTrash should not prevent us from recovering the
@@ -967,10 +905,10 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	v.EmptyTrash()
 	err = v.BlockUntrash(TestHash)
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 	err = checkGet()
 	if err != nil {
-		t.Fatal(err)
+		t.Error(err)
 	}
 }

commit 5c7271dd25c43eb086382f9069b84c7b54942d63
Author: Tom Clegg <tom at curii.com>
Date:   Sat Feb 3 14:43:34 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index 8f83054b2b..5846095c8c 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -334,7 +334,7 @@ func (v *AzureBlobVolume) BlockWrite(ctx context.Context, hash string, data []by
 		io.Copy(bufw, bytes.NewReader(data))
 		bufw.Close()
 	}()
-	errChan := make(chan error)
+	errChan := make(chan error, 1)
 	go func() {
 		var body io.Reader = bufr
 		if len(data) == 0 {
diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
index 7e00db1d93..ab1f84ec05 100644
--- a/services/keepstore/azure_blob_volume_test.go
+++ b/services/keepstore/azure_blob_volume_test.go
@@ -373,7 +373,7 @@ type TestableAzureBlobVolume struct {
 	t         TB
 }
 
-func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs) *TestableAzureBlobVolume {
+func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, params newVolumeParams) *TestableAzureBlobVolume {
 	azHandler := newAzStubHandler(t.(*check.C))
 	azStub := httptest.NewServer(azHandler)
 
@@ -405,10 +405,11 @@ func (s *StubbedAzureBlobSuite) newTestableAzureBlobVolume(t TB, cluster *arvado
 		ListBlobsRetryDelay:  arvados.Duration(time.Millisecond),
 		azClient:             azClient,
 		container:            &azureContainer{ctr: bs.GetContainerReference(container)},
-		cluster:              cluster,
-		volume:               volume,
+		cluster:              params.Cluster,
+		volume:               params.ConfigVolume,
 		logger:               ctxlog.TestLogger(t),
-		metrics:              metrics,
+		metrics:              params.MetricsVecs,
+		bufferPool:           params.BufferPool,
 	}
 	if err = v.check(); err != nil {
 		t.Fatal(err)
@@ -440,16 +441,16 @@ func (s *StubbedAzureBlobSuite) TearDownTest(c *check.C) {
 }
 
 func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeWithGeneric(c *check.C) {
-	DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableAzureBlobVolume(t, cluster, volume, metrics)
+	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+		return s.newTestableAzureBlobVolume(t, params)
 	})
 }
 
 func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C) {
 	// Test (BlockSize mod azureMaxGetBytes)==0 and !=0 cases
 	for _, b := range []int{2 << 22, 2<<22 - 1} {
-		DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-			v := s.newTestableAzureBlobVolume(t, cluster, volume, metrics)
+		DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+			v := s.newTestableAzureBlobVolume(t, params)
 			v.MaxGetBytes = b
 			return v
 		})
@@ -457,13 +458,18 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeConcurrentRanges(c *check.C)
 }
 
 func (s *StubbedAzureBlobSuite) TestReadonlyAzureBlobVolumeWithGeneric(c *check.C) {
-	DoGenericVolumeTests(c, false, func(c TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableAzureBlobVolume(c, cluster, volume, metrics)
+	DoGenericVolumeTests(c, false, func(c TB, params newVolumeParams) TestableVolume {
+		return s.newTestableAzureBlobVolume(c, params)
 	})
 }
 
 func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
-	v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+		Cluster:      testCluster(c),
+		ConfigVolume: arvados.Volume{Replication: 3},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	})
 	defer v.Teardown()
 
 	for _, size := range []int{
@@ -499,7 +505,12 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
 }
 
 func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
-	v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+		Cluster:      testCluster(c),
+		ConfigVolume: arvados.Volume{Replication: 3},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	})
 	defer v.Teardown()
 
 	var wg sync.WaitGroup
@@ -535,7 +546,12 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
 }
 
 func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *check.C) {
-	v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+		Cluster:      testCluster(c),
+		ConfigVolume: arvados.Volume{Replication: 3},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	})
 	v.AzureBlobVolume.WriteRaceInterval.Set("2s")
 	v.AzureBlobVolume.WriteRacePollTime.Set("5ms")
 	defer v.Teardown()
@@ -591,7 +607,12 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockWrite(c *ch
 }
 
 func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, testFunc func(context.Context, *TestableAzureBlobVolume) error) {
-	v := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+	v := s.newTestableAzureBlobVolume(c, newVolumeParams{
+		Cluster:      testCluster(c),
+		ConfigVolume: arvados.Volume{Replication: 3},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	})
 	defer v.Teardown()
 	v.azHandler.race = make(chan chan struct{})
 
@@ -627,7 +648,12 @@ func (s *StubbedAzureBlobSuite) testAzureBlobVolumeContextCancel(c *check.C, tes
 }
 
 func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
-	volume := s.newTestableAzureBlobVolume(c, testCluster(c), arvados.Volume{Replication: 3}, newVolumeMetricsVecs(prometheus.NewRegistry()))
+	volume := s.newTestableAzureBlobVolume(c, newVolumeParams{
+		Cluster:      testCluster(c),
+		ConfigVolume: arvados.Volume{Replication: 3},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	})
 	defer volume.Teardown()
 
 	stats := func() string {
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 3b33f93bd2..4ff690a42b 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -150,7 +150,7 @@ func (ks *keepstore) setupMounts(metrics *volumeMetricsVecs) error {
 			},
 		}
 		ks.mounts[uuid] = mnt
-		ks.logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol, mnt.AllowWrite, mnt.AllowTrash)
+		ks.logger.Printf("started volume %s (%s), AllowWrite=%v, AllowTrash=%v", uuid, vol.DeviceID(), mnt.AllowWrite, mnt.AllowTrash)
 	}
 	if len(ks.mounts) == 0 {
 		return fmt.Errorf("no volumes configured for %s", ks.serviceURL)
@@ -387,13 +387,20 @@ func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOpti
 	cond := sync.NewCond(new(sync.Mutex))
 	cond.L.Lock()
 	var wg sync.WaitGroup
+nextmnt:
 	for _, mnt := range ks.rendezvous(hash, ks.mountsW) {
-		for result.Want(mnt) && !pending.Want(mnt) && ctx.Err() == nil {
+		for {
+			if result.Done() || ctx.Err() != nil {
+				break nextmnt
+			}
+			if !result.Want(mnt) {
+				continue nextmnt
+			}
+			if pending.Want(mnt) {
+				break
+			}
 			cond.Wait()
 		}
-		if !result.Want(mnt) || ctx.Err() != nil {
-			continue
-		}
 		mnt := mnt
 		logger := ks.logger.WithField("mount", mnt.UUID)
 		pending.Add(mnt)
@@ -404,6 +411,7 @@ func (ks *keepstore) BlockWrite(ctx context.Context, opts arvados.BlockWriteOpti
 			err := mnt.BlockWrite(ctx, hash, opts.Data)
 			cond.L.Lock()
 			defer cond.L.Unlock()
+			defer cond.Broadcast()
 			if err != nil {
 				logger.Debug("write failed")
 				pending.Sub(mnt)
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
index 81f3f81ef7..f5482f65bf 100644
--- a/services/keepstore/keepstore_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -58,6 +58,7 @@ func testKeepstore(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*k
 		reg = prometheus.NewRegistry()
 	}
 	ctx, cancel := context.WithCancel(context.Background())
+	ctx = ctxlog.Context(ctx, ctxlog.TestLogger(t))
 	ks, err := newKeepstore(ctx, cluster, cluster.SystemRootToken, reg, testServiceURL)
 	if err != nil {
 		t.Fatal(err)
@@ -140,7 +141,7 @@ func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
 				WriteTo: buf,
 			})
 			c.Check(err, IsNil)
-			c.Check(string(buf.Bytes()), Equals, "foo")
+			c.Check(buf.String(), Equals, "foo")
 		}
 	}
 }
@@ -206,7 +207,7 @@ func (s *keepstoreSuite) TestBlockWrite_NoWritableVolumes(c *C) {
 	defer cancel()
 	for _, mnt := range ks.mounts {
 		mnt.volume.(*stubVolume).blockWrite = func(context.Context, string, []byte) error {
-			c.Fatal("volume BlockWrite called")
+			c.Error("volume BlockWrite called")
 			return errors.New("fail")
 		}
 	}
diff --git a/services/keepstore/s3aws_volume_test.go b/services/keepstore/s3aws_volume_test.go
index 60b8534993..9b6b737e52 100644
--- a/services/keepstore/s3aws_volume_test.go
+++ b/services/keepstore/s3aws_volume_test.go
@@ -74,29 +74,34 @@ func (s *StubbedS3AWSSuite) SetUpTest(c *check.C) {
 }
 
 func (s *StubbedS3AWSSuite) TestGeneric(c *check.C) {
-	DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
+	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
 		// Use a negative raceWindow so s3test's 1-second
 		// timestamp precision doesn't confuse fixRace.
-		return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+		return s.newTestableVolume(c, params, -2*time.Second)
 	})
 }
 
 func (s *StubbedS3AWSSuite) TestGenericReadOnly(c *check.C) {
-	DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+	DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+		return s.newTestableVolume(c, params, -2*time.Second)
 	})
 }
 
 func (s *StubbedS3AWSSuite) TestGenericWithPrefix(c *check.C) {
-	DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		v := s.newTestableVolume(c, cluster, volume, metrics, -2*time.Second)
+	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+		v := s.newTestableVolume(c, params, -2*time.Second)
 		v.PrefixLength = 3
 		return v
 	})
 }
 
 func (s *StubbedS3AWSSuite) TestIndex(c *check.C) {
-	v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 0)
+	v := s.newTestableVolume(c, newVolumeParams{
+		Cluster:      s.cluster,
+		ConfigVolume: arvados.Volume{Replication: 2},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	}, 0)
 	v.IndexPageSize = 3
 	for i := 0; i < 256; i++ {
 		v.PutRaw(fmt.Sprintf("%02x%030x", i, i), []byte{102, 111, 111})
@@ -202,7 +207,12 @@ func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
 }
 
 func (s *StubbedS3AWSSuite) TestStats(c *check.C) {
-	v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+	v := s.newTestableVolume(c, newVolumeParams{
+		Cluster:      s.cluster,
+		ConfigVolume: arvados.Volume{Replication: 2},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	}, 5*time.Minute)
 	stats := func() string {
 		buf, err := json.Marshal(v.InternalStats())
 		c.Check(err, check.IsNil)
@@ -268,7 +278,12 @@ func (s *StubbedS3AWSSuite) testContextCancel(c *check.C, testFunc func(context.
 	s.s3server = httptest.NewServer(handler)
 	defer s.s3server.Close()
 
-	v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+	v := s.newTestableVolume(c, newVolumeParams{
+		Cluster:      s.cluster,
+		ConfigVolume: arvados.Volume{Replication: 2},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	}, 5*time.Minute)
 
 	ctx, cancel := context.WithCancel(context.Background())
 
@@ -308,7 +323,12 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 	s.cluster.Collections.BlobTrashLifetime.Set("1h")
 	s.cluster.Collections.BlobSigningTTL.Set("1h")
 
-	v := s.newTestableVolume(c, s.cluster, arvados.Volume{Replication: 2}, newVolumeMetricsVecs(prometheus.NewRegistry()), 5*time.Minute)
+	v := s.newTestableVolume(c, newVolumeParams{
+		Cluster:      s.cluster,
+		ConfigVolume: arvados.Volume{Replication: 2},
+		MetricsVecs:  newVolumeMetricsVecs(prometheus.NewRegistry()),
+		BufferPool:   newBufferPool(ctxlog.TestLogger(c), 8, prometheus.NewRegistry()),
+	}, 5*time.Minute)
 	var none time.Time
 
 	putS3Obj := func(t time.Time, key string, data []byte) {
@@ -539,7 +559,7 @@ func (l LogrusLog) Print(level gofakes3.LogLevel, v ...interface{}) {
 	}
 }
 
-func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, raceWindow time.Duration) *TestableS3AWSVolume {
+func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, params newVolumeParams, raceWindow time.Duration) *TestableS3AWSVolume {
 
 	clock := &s3AWSFakeClock{}
 	// fake s3
@@ -575,10 +595,11 @@ func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, cluster *arvados.Clust
 				UnsafeDelete:       true,
 				IndexPageSize:      1000,
 			},
-			cluster: cluster,
-			volume:  volume,
-			logger:  ctxlog.TestLogger(c),
-			metrics: metrics,
+			cluster:    params.Cluster,
+			volume:     params.ConfigVolume,
+			logger:     params.Logger,
+			metrics:    params.MetricsVecs,
+			bufferPool: params.BufferPool,
 		},
 		c:           c,
 		server:      srv,
diff --git a/services/keepstore/streamwriterat.go b/services/keepstore/streamwriterat.go
index a5ccf1ffae..365b55f233 100644
--- a/services/keepstore/streamwriterat.go
+++ b/services/keepstore/streamwriterat.go
@@ -37,11 +37,14 @@ type streamWriterAt struct {
 
 // newStreamWriterAt creates a new streamWriterAt.
 func newStreamWriterAt(w io.Writer, partsize int, buf []byte) *streamWriterAt {
+	if partsize == 0 {
+		partsize = 65536
+	}
 	nparts := (len(buf) + partsize - 1) / partsize
 	swa := &streamWriterAt{
 		writer:     w,
 		partsize:   partsize,
-		buf:        buf[:0],
+		buf:        buf,
 		partfilled: make([]int, nparts),
 		partready:  make(chan []byte, nparts),
 		errWrite:   make(chan error, 1),
@@ -81,9 +84,12 @@ func (swa *streamWriterAt) writeToWriter() {
 // WriteAt implements io.WriterAt.
 func (swa *streamWriterAt) WriteAt(p []byte, offset int64) (int, error) {
 	pos := int(offset)
-	n := copy(swa.buf[pos:], p)
+	n := 0
+	if pos <= len(swa.buf) {
+		n = copy(swa.buf[pos:], p)
+	}
 	if n < len(p) {
-		return n, errors.New("write beyond end of buffer")
+		return n, fmt.Errorf("write beyond end of buffer: offset %d len %d buf %d", offset, len(p), len(swa.buf))
 	}
 	endpos := pos + n
 
diff --git a/services/keepstore/streamwriterat_test.go b/services/keepstore/streamwriterat_test.go
index 924fce9b71..fe6837e522 100644
--- a/services/keepstore/streamwriterat_test.go
+++ b/services/keepstore/streamwriterat_test.go
@@ -13,30 +13,14 @@ import (
 
 var _ = Suite(&streamWriterAtSuite{})
 
-type streamWriterAtSuite struct {
-}
-
-func (s *streamWriterAtSuite) TestInvalidUsage(c *C) {
-	p := []byte("foo")
-
-	swa := streamWriterAt{}
-	_, err := swa.WriteAt(p, 0)
-	c.Check(err, NotNil)
-	err = swa.Close()
-	c.Check(err, NotNil)
-
-	swa = streamWriterAt{buf: make([]byte, 3)}
-	_, err = swa.WriteAt(p, 0)
-	c.Check(err, NotNil)
-	err = swa.Close()
-	c.Check(err, NotNil)
-}
+type streamWriterAtSuite struct{}
 
 func (s *streamWriterAtSuite) TestPartSizes(c *C) {
 	for partsize := 1; partsize < 5; partsize++ {
 		for writesize := 1; writesize < 5; writesize++ {
 			for datasize := 1; datasize < 100; datasize += 13 {
 				for bufextra := 0; bufextra < 5; bufextra++ {
+					c.Logf("=== partsize %d writesize %d datasize %d bufextra %d", partsize, writesize, datasize, bufextra)
 					outbuf := bytes.NewBuffer(nil)
 					indata := make([]byte, datasize)
 					for i := range indata {
@@ -56,10 +40,8 @@ func (s *streamWriterAtSuite) TestPartSizes(c *C) {
 							swa.WriteAt(indata[pos:endpos], int64(pos))
 						}()
 					}
-					go func() {
-						wg.Wait()
-						swa.Close()
-					}()
+					wg.Wait()
+					swa.Close()
 					c.Check(outbuf.Bytes(), DeepEquals, indata)
 				}
 			}
@@ -74,7 +56,7 @@ func (s *streamWriterAtSuite) TestOverflow(c *C) {
 		_, err := swa.WriteAt([]byte("foo"), int64(len(buf)+offset))
 		c.Check(err, NotNil)
 		err = swa.Close()
-		c.Check(err, NotNil)
+		c.Check(err, IsNil)
 	}
 }
 
diff --git a/services/keepstore/unix_volume_test.go b/services/keepstore/unix_volume_test.go
index dee146acd6..555a4bc6dd 100644
--- a/services/keepstore/unix_volume_test.go
+++ b/services/keepstore/unix_volume_test.go
@@ -17,10 +17,8 @@ import (
 	"syscall"
 	"time"
 
-	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/ctxlog"
 	"github.com/prometheus/client_golang/prometheus"
-	"github.com/sirupsen/logrus"
 	check "gopkg.in/check.v1"
 )
 
@@ -62,14 +60,19 @@ func (v *TestableUnixVolume) ReadWriteOperationLabelValues() (r, w string) {
 var _ = check.Suite(&UnixVolumeSuite{})
 
 type UnixVolumeSuite struct {
-	cluster *arvados.Cluster
+	params  newVolumeParams
 	volumes []*TestableUnixVolume
-	metrics *volumeMetricsVecs
 }
 
 func (s *UnixVolumeSuite) SetUpTest(c *check.C) {
-	s.cluster = testCluster(c)
-	s.metrics = newVolumeMetricsVecs(prometheus.NewRegistry())
+	logger := ctxlog.TestLogger(c)
+	reg := prometheus.NewRegistry()
+	s.params = newVolumeParams{
+		Cluster:     testCluster(c),
+		Logger:      logger,
+		MetricsVecs: newVolumeMetricsVecs(reg),
+		BufferPool:  newBufferPool(logger, 8, reg),
+	}
 }
 
 func (s *UnixVolumeSuite) TearDownTest(c *check.C) {
@@ -78,7 +81,7 @@ func (s *UnixVolumeSuite) TearDownTest(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Cluster, volume arvados.Volume, metrics *volumeMetricsVecs, serialize bool) *TestableUnixVolume {
+func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, params newVolumeParams, serialize bool) *TestableUnixVolume {
 	d, err := ioutil.TempDir("", "volume_test")
 	c.Check(err, check.IsNil)
 	var locker sync.Locker
@@ -89,10 +92,10 @@ func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Clu
 		UnixVolume: UnixVolume{
 			Root:    d,
 			locker:  locker,
-			cluster: cluster,
-			logger:  ctxlog.TestLogger(c),
-			volume:  volume,
-			metrics: metrics,
+			cluster: params.Cluster,
+			logger:  params.Logger,
+			volume:  params.ConfigVolume,
+			metrics: params.MetricsVecs,
 		},
 		t: c,
 	}
@@ -103,34 +106,34 @@ func (s *UnixVolumeSuite) newTestableUnixVolume(c *check.C, cluster *arvados.Clu
 
 // serialize = false; readonly = false
 func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTests(c *check.C) {
-	DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableUnixVolume(c, cluster, volume, metrics, false)
+	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+		return s.newTestableUnixVolume(c, params, false)
 	})
 }
 
 // serialize = false; readonly = true
 func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsReadOnly(c *check.C) {
-	DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableUnixVolume(c, cluster, volume, metrics, true)
+	DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+		return s.newTestableUnixVolume(c, params, true)
 	})
 }
 
 // serialize = true; readonly = false
 func (s *UnixVolumeSuite) TestUnixVolumeWithGenericTestsSerialized(c *check.C) {
-	DoGenericVolumeTests(c, false, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableUnixVolume(c, cluster, volume, metrics, false)
+	DoGenericVolumeTests(c, false, func(t TB, params newVolumeParams) TestableVolume {
+		return s.newTestableUnixVolume(c, params, false)
 	})
 }
 
 // serialize = true; readonly = true
 func (s *UnixVolumeSuite) TestUnixVolumeHandlersWithGenericVolumeTests(c *check.C) {
-	DoGenericVolumeTests(c, true, func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume {
-		return s.newTestableUnixVolume(c, cluster, volume, metrics, true)
+	DoGenericVolumeTests(c, true, func(t TB, params newVolumeParams) TestableVolume {
+		return s.newTestableUnixVolume(c, params, true)
 	})
 }
 
 func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 	v.BlockWrite(context.Background(), TestHash, TestBlock)
 
@@ -147,7 +150,7 @@ func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestPut(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	err := v.BlockWrite(context.Background(), TestHash, TestBlock)
@@ -164,7 +167,7 @@ func (s *UnixVolumeSuite) TestPut(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	err := os.RemoveAll(v.Root)
@@ -174,7 +177,7 @@ func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	fullPath := v.Root + "/full"
@@ -194,7 +197,7 @@ func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	v.BlockWrite(context.Background(), TestHash, TestBlock)
@@ -208,7 +211,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	funcCalled := false
@@ -225,7 +228,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncFileError(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 
 	v.BlockWrite(context.Background(), TestHash, TestBlock)
@@ -260,7 +263,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, true)
+	v := s.newTestableUnixVolume(c, s.params, true)
 	defer v.Teardown()
 	v.locker.Lock()
 	ctx, cancel := context.WithCancel(context.Background())
@@ -277,7 +280,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	v := s.newTestableUnixVolume(c, s.params, false)
 	defer v.Teardown()
 	bpath := v.blockPath(TestHash)
 	v.PutRaw(TestHash, TestBlock)
@@ -299,7 +302,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestStats(c *check.C) {
-	vol := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	vol := s.newTestableUnixVolume(c, s.params, false)
 	stats := func() string {
 		buf, err := json.Marshal(vol.InternalStats())
 		c.Check(err, check.IsNil)
@@ -344,7 +347,7 @@ func (s *UnixVolumeSuite) TestStats(c *check.C) {
 }
 
 func (s *UnixVolumeSuite) TestSkipUnusedDirs(c *check.C) {
-	vol := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
+	vol := s.newTestableUnixVolume(c, s.params, false)
 
 	err := os.Mkdir(vol.UnixVolume.Root+"/aaa", 0777)
 	c.Assert(err, check.IsNil)
diff --git a/services/keepstore/volume_generic_test.go b/services/keepstore/volume_generic_test.go
index ada8ce3035..00ef12f4e1 100644
--- a/services/keepstore/volume_generic_test.go
+++ b/services/keepstore/volume_generic_test.go
@@ -40,7 +40,7 @@ type TB interface {
 // A TestableVolumeFactory returns a new TestableVolume. The factory
 // function, and the TestableVolume it returns, can use "t" to write
 // logs, fail the current test, etc.
-type TestableVolumeFactory func(t TB, cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) TestableVolume
+type TestableVolumeFactory func(t TB, params newVolumeParams) TestableVolume
 
 // DoGenericVolumeTests runs a set of tests that every TestableVolume
 // is expected to pass. It calls factory to create a new TestableVolume
@@ -91,11 +91,12 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 }
 
 type genericVolumeSuite struct {
-	cluster  *arvados.Cluster
-	volume   arvados.Volume
-	logger   logrus.FieldLogger
-	metrics  *volumeMetricsVecs
-	registry *prometheus.Registry
+	cluster    *arvados.Cluster
+	volume     arvados.Volume
+	logger     logrus.FieldLogger
+	metrics    *volumeMetricsVecs
+	registry   *prometheus.Registry
+	bufferPool *bufferPool
 }
 
 func (s *genericVolumeSuite) setup(t TB) {
@@ -103,10 +104,17 @@ func (s *genericVolumeSuite) setup(t TB) {
 	s.logger = ctxlog.TestLogger(t)
 	s.registry = prometheus.NewRegistry()
 	s.metrics = newVolumeMetricsVecs(s.registry)
+	s.bufferPool = newBufferPool(s.logger, 8, s.registry)
 }
 
 func (s *genericVolumeSuite) newVolume(t TB, factory TestableVolumeFactory) TestableVolume {
-	return factory(t, s.cluster, s.volume, s.logger, s.metrics)
+	return factory(t, newVolumeParams{
+		Cluster:      s.cluster,
+		ConfigVolume: s.volume,
+		Logger:       s.logger,
+		MetricsVecs:  s.metrics,
+		BufferPool:   s.bufferPool,
+	})
 }
 
 // Put a test block, get it and verify content

commit ff875a23189a96e7d2109efdc5b030ad86002efe
Author: Tom Clegg <tom at curii.com>
Date:   Fri Feb 2 16:12:21 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index d8bb0f06ba..8f83054b2b 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -511,20 +511,6 @@ func (v *AzureBlobVolume) BlockUntrash(hash string) error {
 	return v.translateError(err)
 }
 
-// Status returns a VolumeStatus struct with placeholder data.
-func (v *AzureBlobVolume) Status() *VolumeStatus {
-	return &VolumeStatus{
-		DeviceNum: 1,
-		BytesFree: BlockSize * 1000,
-		BytesUsed: 1,
-	}
-}
-
-// String returns a volume label, including the container name.
-func (v *AzureBlobVolume) String() string {
-	return fmt.Sprintf("azure-storage-container:%+q", v.ContainerName)
-}
-
 // If possible, translate an Azure SDK error to a recognizable error
 // like os.ErrNotExist.
 func (v *AzureBlobVolume) translateError(err error) error {
@@ -617,7 +603,7 @@ func (v *AzureBlobVolume) EmptyTrash() {
 	close(todo)
 	wg.Wait()
 
-	v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+	v.logger.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
 // InternalStats returns bucket I/O and API call counters.
diff --git a/services/keepstore/command.go b/services/keepstore/command.go
index c0b2f56692..d01b30c907 100644
--- a/services/keepstore/command.go
+++ b/services/keepstore/command.go
@@ -110,8 +110,8 @@ func newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, tok
 	if err != nil {
 		return service.ErrorHandler(ctx, cluster, err)
 	}
-	puller := newPuller(ks, reg)
-	trasher := newTrasher(ks, reg)
-	_ = newTrashEmptier(ks, reg)
+	puller := newPuller(ctx, ks, reg)
+	trasher := newTrasher(ctx, ks, reg)
+	_ = newTrashEmptier(ctx, ks, reg)
 	return newRouter(ks, puller, trasher)
 }
diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
index b7a094d0b8..81f3f81ef7 100644
--- a/services/keepstore/keepstore_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -8,11 +8,11 @@ import (
 	"bytes"
 	"context"
 	"crypto/md5"
+	"errors"
 	"fmt"
 	"io"
 	"net/http"
 	"os"
-	"sort"
 	"strings"
 	"sync"
 	"time"
@@ -26,6 +26,11 @@ import (
 	. "gopkg.in/check.v1"
 )
 
+const (
+	fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+	barHash = "37b51d194a7513e45b56f6524f2d51f2"
+)
+
 var testServiceURL = func() arvados.URL {
 	return arvados.URL{Host: "localhost:12345", Scheme: "http"}
 }()
@@ -48,9 +53,11 @@ func testCluster(t TB) *arvados.Cluster {
 	return cluster
 }
 
-func testKeepstore(t TB, cluster *arvados.Cluster) (*keepstore, context.CancelFunc) {
+func testKeepstore(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*keepstore, context.CancelFunc) {
+	if reg == nil {
+		reg = prometheus.NewRegistry()
+	}
 	ctx, cancel := context.WithCancel(context.Background())
-	reg := prometheus.NewRegistry()
 	ks, err := newKeepstore(ctx, cluster, cluster.SystemRootToken, reg, testServiceURL)
 	if err != nil {
 		t.Fatal(err)
@@ -73,7 +80,7 @@ func (s *keepstoreSuite) SetUpTest(c *C) {
 }
 
 func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
-	ks, cancel := testKeepstore(c, s.cluster)
+	ks, cancel := testKeepstore(c, s.cluster, nil)
 	defer cancel()
 
 	ctx := authContext(arvadostest.ActiveTokenV2)
@@ -110,11 +117,9 @@ func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
 
 func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
 	s.cluster.Collections.BlobSigning = false
-	ks, cancel := testKeepstore(c, s.cluster)
+	ks, cancel := testKeepstore(c, s.cluster, nil)
 	defer cancel()
 
-	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
-
 	resp, err := ks.BlockWrite(authContext("abcde"), arvados.BlockWriteOptions{
 		Hash: fooHash,
 		Data: []byte("foo"),
@@ -152,8 +157,6 @@ func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 			StorageClasses: map[string]bool{"class2": true, "class3": true}},
 	}
 
-	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
-
 	for _, trial := range []struct {
 		priority1 int // priority of class1, thus vol1
 		priority2 int // priority of class2
@@ -172,7 +175,7 @@ func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 			"class2": {Priority: trial.priority2},
 			"class3": {Priority: trial.priority3},
 		}
-		ks, cancel := testKeepstore(c, s.cluster)
+		ks, cancel := testKeepstore(c, s.cluster, nil)
 		defer cancel()
 		stubLog := &stubLog{}
 		for _, mnt := range ks.mounts {
@@ -199,15 +202,19 @@ func (s *keepstoreSuite) TestBlockWrite_NoWritableVolumes(c *C) {
 	for _, v := range s.cluster.Volumes {
 		v.ReadOnly = true
 	}
-	ks, cancel := testKeepstore(c, s.cluster)
+	ks, cancel := testKeepstore(c, s.cluster, nil)
 	defer cancel()
 	for _, mnt := range ks.mounts {
-		mnt.volume.(*stubVolume).blockWrite = func(context.Context, string, []byte) error { c.Fatal("volume BlockWrite called") }
+		mnt.volume.(*stubVolume).blockWrite = func(context.Context, string, []byte) error {
+			c.Fatal("volume BlockWrite called")
+			return errors.New("fail")
+		}
 	}
 	ctx := authContext(arvadostest.ActiveTokenV2)
-	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
 
-	_, err := ks.BlockWrite(ctx, fooHash, []byte("foo"))
+	_, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+		Hash: fooHash,
+		Data: []byte("foo")})
 	c.Check(err, NotNil)
 	c.Check(err.(interface{ HTTPStatus() int }).HTTPStatus(), Equals, http.StatusInsufficientStorage)
 }
@@ -232,19 +239,19 @@ func (s *keepstoreSuite) TestBlockWrite_MultipleStorageClasses(c *C) {
 		"class2": {},
 		"class3": {},
 	}
-	ks, cancel := testKeepstore(c, s.cluster)
+	ks, cancel := testKeepstore(c, s.cluster, nil)
 	defer cancel()
 	stubLog := &stubLog{}
 	for _, mnt := range ks.mounts {
 		mnt.volume.(*stubVolume).stubLog = stubLog
 	}
-	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
 
 	rvz := ks.rendezvous(fooHash, ks.mountsW)
 	c.Assert(rvz[0].UUID[24:], Equals, "111")
 	c.Assert(rvz[1].UUID[24:], Equals, "121")
 	c.Assert(rvz[2].UUID[24:], Equals, "222")
 
+	ctx := authContext(arvadostest.ActiveTokenV2)
 	for _, trial := range []struct {
 		classes   string // desired classes
 		expectLog string
@@ -310,172 +317,105 @@ func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
 		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
 	}
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-	rt := RequestTester{
-		method:      "PUT",
-		uri:         "/" + TestHash,
-		requestBody: TestBlock,
-	}
+	ks, cancel := testKeepstore(c, s.cluster, nil)
+	defer cancel()
+	ctx := authContext(arvadostest.ActiveTokenV2)
 
 	for _, trial := range []struct {
-		ask    string
-		expect string
+		ask    []string
+		expect map[string]int
 	}{
-		{"", ""},
-		{"default", "default=1"},
-		{" , default , default , ", "default=1"},
-		{"special", "extra=1, special=1"},
-		{"special, readonly", "extra=1, special=1"},
-		{"special, nonexistent", "extra=1, special=1"},
-		{"extra, special", "extra=1, special=1"},
-		{"default, special", "default=1, extra=1, special=1"},
+		{nil,
+			map[string]int{"default": 1}},
+		{[]string{},
+			map[string]int{"default": 1}},
+		{[]string{"default"},
+			map[string]int{"default": 1}},
+		{[]string{"default", "default"},
+			map[string]int{"default": 1}},
+		{[]string{"special"},
+			map[string]int{"extra": 1, "special": 1}},
+		{[]string{"special", "readonly"},
+			map[string]int{"extra": 1, "special": 1}},
+		{[]string{"special", "nonexistent"},
+			map[string]int{"extra": 1, "special": 1}},
+		{[]string{"extra", "special"},
+			map[string]int{"extra": 1, "special": 1}},
+		{[]string{"default", "special"},
+			map[string]int{"default": 1, "extra": 1, "special": 1}},
 	} {
 		c.Logf("success case %#v", trial)
-		rt.storageClasses = trial.ask
-		resp := IssueRequest(s.router, &rt)
-		if trial.expect == "" {
+		resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+			Hash:           fooHash,
+			Data:           []byte("foo"),
+			StorageClasses: trial.ask,
+		})
+		if !c.Check(err, IsNil) {
+			continue
+		}
+		c.Check(resp.Replicas, Equals, 1)
+		if len(trial.expect) == 0 {
 			// any non-empty value is correct
-			c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Not(Equals), "")
+			c.Check(resp.StorageClasses, Not(HasLen), 0)
 		} else {
-			c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, trial.expect)
+			c.Check(resp.StorageClasses, DeepEquals, trial.expect)
 		}
 	}
 
-	for _, trial := range []struct {
-		ask string
-	}{
+	for _, ask := range [][]string{
 		{"doesnotexist"},
-		{"doesnotexist, readonly"},
+		{"doesnotexist", "readonly"},
 		{"readonly"},
 	} {
-		c.Logf("failure case %#v", trial)
-		rt.storageClasses = trial.ask
-		resp := IssueRequest(s.router, &rt)
-		c.Check(resp.Code, Equals, http.StatusServiceUnavailable)
+		c.Logf("failure case %s", ask)
+		_, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+			Hash:           fooHash,
+			Data:           []byte("foo"),
+			StorageClasses: ask,
+		})
+		c.Check(err, NotNil)
 	}
 }
 
-func sortCommaSeparated(s string) string {
-	slice := strings.Split(s, ", ")
-	sort.Strings(slice)
-	return strings.Join(slice, ", ")
-}
-
-func (s *keepstoreSuite) TestPutResponseHeader(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-
-	resp := IssueRequest(s.router, &RequestTester{
-		method:      "PUT",
-		uri:         "/" + TestHash,
-		requestBody: TestBlock,
-	})
-	c.Logf("%#v", resp)
-	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
-	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "default=1")
-}
-
 func (s *keepstoreSuite) TestUntrashHandler(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-
-	// Set up Keep volumes
-	vols := s.router.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	// unauthenticatedReq => UnauthorizedError
-	unauthenticatedReq := &RequestTester{
-		method: "PUT",
-		uri:    "/untrash/" + TestHash,
-	}
-	response := IssueRequest(s.router, unauthenticatedReq)
-	ExpectStatusCode(c,
-		"Unauthenticated request",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// notDataManagerReq => UnauthorizedError
-	notDataManagerReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/" + TestHash,
-		apiToken: knownToken,
-	}
-
-	response = IssueRequest(s.router, notDataManagerReq)
-	ExpectStatusCode(c,
-		"Non-datamanager token",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// datamanagerWithBadHashReq => StatusBadRequest
-	datamanagerWithBadHashReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/thisisnotalocator",
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response = IssueRequest(s.router, datamanagerWithBadHashReq)
-	ExpectStatusCode(c,
-		"Bad locator in untrash request",
-		http.StatusBadRequest,
-		response)
-
-	// datamanagerWrongMethodReq => StatusBadRequest
-	datamanagerWrongMethodReq := &RequestTester{
-		method:   "GET",
-		uri:      "/untrash/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response = IssueRequest(s.router, datamanagerWrongMethodReq)
-	ExpectStatusCode(c,
-		"Only PUT method is supported for untrash",
-		http.StatusMethodNotAllowed,
-		response)
-
-	// datamanagerReq => StatusOK
-	datamanagerReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response = IssueRequest(s.router, datamanagerReq)
-	ExpectStatusCode(c,
-		"",
-		http.StatusOK,
-		response)
-	c.Check(response.Body.String(), Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
+	c.Fatal("todo")
+	c.Check("resp", Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
 }
 
 func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
-	// Change all volumes to read-only
 	for uuid, v := range s.cluster.Volumes {
 		v.ReadOnly = true
 		s.cluster.Volumes[uuid] = v
 	}
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
+	ks, cancel := testKeepstore(c, s.cluster, nil)
+	defer cancel()
+
+	for _, mnt := range ks.mounts {
+		err := mnt.BlockWrite(context.Background(), fooHash, []byte("foo"))
+		c.Assert(err, IsNil)
+		_, err = mnt.BlockRead(context.Background(), fooHash, io.Discard)
+		c.Assert(err, IsNil)
+	}
+
+	err := ks.BlockUntrash(context.Background(), fooHash)
+	c.Check(os.IsNotExist(err), Equals, true)
 
-	// datamanagerReq => StatusOK
-	datamanagerReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
+	for _, mnt := range ks.mounts {
+		_, err := mnt.BlockRead(context.Background(), fooHash, io.Discard)
+		c.Assert(err, IsNil)
 	}
-	response := IssueRequest(s.router, datamanagerReq)
-	ExpectStatusCode(c,
-		"No writable volumes",
-		http.StatusNotFound,
-		response)
 }
 
 func (s *keepstoreSuite) TestBlockWrite_SkipReadonly(c *C) {
-	c.Fail("todo")
+	c.Fatal("todo")
 }
 
 func (s *keepstoreSuite) TestBlockTrash_SkipReadonly(c *C) {
-	c.Fail("todo")
+	c.Fatal("todo")
 }
 
 func (s *keepstoreSuite) TestBlockRead_VolumeError503(c *C) {
-	c.Fail("todo: return 503 ")
+	c.Fatal("todo: return 503 ")
 }
 
 func init() {
@@ -483,7 +423,7 @@ func init() {
 		v := &stubVolume{
 			params:  params,
 			data:    make(map[string]stubData),
-			stubLog: &stubLog,
+			stubLog: &stubLog{},
 		}
 		return v, nil
 	}
@@ -494,13 +434,13 @@ type stubLog struct {
 	bytes.Buffer
 }
 
-func (sl *stubLog) Printf(fmt string, args ...interface{}) {
+func (sl *stubLog) Printf(format string, args ...interface{}) {
 	if sl == nil {
 		return
 	}
 	sl.Lock()
 	defer sl.Unlock()
-	fmt.Fprintf(sl+"\n", fmt, args...)
+	fmt.Fprintf(sl, format+"\n", args...)
 }
 
 type stubData struct {
@@ -534,7 +474,7 @@ type stubVolume struct {
 func (v *stubVolume) log(op, hash string) {
 	// Note this intentionally crashes if len(hash)<32 -- if
 	// keepstore ever does that, tests should fail.
-	v.stubLog.Printf("%s %s %s", v.params.ConfigVolume.UUID, op, hash[29:32])
+	v.stubLog.Printf("%s %s %s", v.params.UUID, op, hash[29:32])
 }
 
 func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
@@ -546,12 +486,12 @@ func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writ
 		}
 	}
 	v.mtx.Lock()
-	data, ok := v.data[hash]
+	ent, ok := v.data[hash]
 	v.mtx.Unlock()
 	if !ok {
 		return 0, os.ErrNotExist
 	}
-	return writeTo.Write(data)
+	return writeTo.Write(ent.data)
 }
 
 func (v *stubVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
@@ -605,7 +545,7 @@ func (v *stubVolume) BlockTrash(hash string) error {
 	if !ok || !ent.trash.IsZero() {
 		return os.ErrNotExist
 	}
-	ent.trash = time.Now().Add(v.Cluster.Collections.TrashLifetime.Duration())
+	ent.trash = time.Now().Add(v.params.Cluster.Collections.BlobTrashLifetime.Duration())
 	v.data[hash] = ent
 	return nil
 }
@@ -629,7 +569,7 @@ func (v *stubVolume) BlockUntrash(hash string) error {
 }
 
 func (v *stubVolume) Index(ctx context.Context, prefix string, writeTo io.Writer) error {
-	v.stubLog.Printf("%s index %s", v.params.ConfigVolume.UUID, prefix)
+	v.stubLog.Printf("%s index %s", v.params.UUID, prefix)
 	if v.index != nil {
 		if err := v.index(ctx, prefix, writeTo); err != nil {
 			return err
@@ -639,7 +579,7 @@ func (v *stubVolume) Index(ctx context.Context, prefix string, writeTo io.Writer
 	v.mtx.Lock()
 	for hash, ent := range v.data {
 		if strings.HasPrefix(hash, prefix) {
-			fmt.Fprintf(buf, "%s+%s %d\n", hash, len(ent.data), ent.mtime.UnixNano())
+			fmt.Fprintf(buf, "%s+%d %d\n", hash, len(ent.data), ent.mtime.UnixNano())
 		}
 	}
 	v.mtx.Unlock()
@@ -664,7 +604,7 @@ func (v *stubVolume) Mtime(hash string) (time.Time, error) {
 }
 
 func (v *stubVolume) EmptyTrash() {
-	v.stubLog.Printf("%s emptytrash", v.params.ConfigVolume.UUID)
+	v.stubLog.Printf("%s emptytrash", v.params.UUID)
 	v.mtx.Lock()
 	defer v.mtx.Unlock()
 	for hash, ent := range v.data {
diff --git a/services/keepstore/mounts_test.go b/services/keepstore/mounts_test.go
index 69e31bfa41..8b0ad6a9be 100644
--- a/services/keepstore/mounts_test.go
+++ b/services/keepstore/mounts_test.go
@@ -5,12 +5,11 @@
 package keepstore
 
 import (
-	"bytes"
 	"context"
 	"encoding/json"
 	"net/http"
-	"net/http/httptest"
 
+	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
 	"git.arvados.org/arvados.git/sdk/go/ctxlog"
 	"git.arvados.org/arvados.git/sdk/go/httpserver"
@@ -19,14 +18,16 @@ import (
 )
 
 func (s *routerSuite) TestMounts(c *C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
 
-	vols := s.handler.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	vols[1].Put(context.Background(), TestHash2, TestBlock2)
+	router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+	router.keepstore.mountsW[1].BlockWrite(context.Background(), barHash, []byte("bar"))
 
-	resp := s.call("GET", "/mounts", "", nil)
+	resp := call(router, "GET", "/mounts", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Log(resp.Body.String())
+
 	var mntList []struct {
 		UUID           string          `json:"uuid"`
 		DeviceID       string          `json:"device_id"`
@@ -34,10 +35,10 @@ func (s *routerSuite) TestMounts(c *C) {
 		Replication    int             `json:"replication"`
 		StorageClasses map[string]bool `json:"storage_classes"`
 	}
-	c.Log(resp.Body.String())
 	err := json.Unmarshal(resp.Body.Bytes(), &mntList)
 	c.Assert(err, IsNil)
-	c.Assert(len(mntList), Equals, 2)
+	c.Assert(mntList, HasLen, 2)
+
 	for _, m := range mntList {
 		c.Check(len(m.UUID), Equals, 27)
 		c.Check(m.UUID[:12], Equals, "zzzzz-nyw5e-")
@@ -50,52 +51,56 @@ func (s *routerSuite) TestMounts(c *C) {
 
 	// Bad auth
 	for _, tok := range []string{"", "xyzzy"} {
-		resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil)
+		resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil, nil)
 		c.Check(resp.Code, Equals, http.StatusUnauthorized)
 		c.Check(resp.Body.String(), Equals, "Unauthorized\n")
 	}
 
-	tok := arvadostest.SystemRootToken
-
 	// Nonexistent mount UUID
-	resp = s.call("GET", "/mounts/X/blocks", tok, nil)
+	resp = call(router, "GET", "/mounts/X/blocks", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusNotFound)
-	c.Check(resp.Body.String(), Equals, "mount not found\n")
 
 	// Complete index of first mount
-	resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks", tok, nil)
+	resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
-	c.Check(resp.Body.String(), Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
+	c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
 
 	// Partial index of first mount (one block matches prefix)
-	resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+TestHash[:2], tok, nil)
+	resp = call(router, "GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
-	c.Check(resp.Body.String(), Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
+	c.Check(resp.Body.String(), Matches, fooHash+`\+[0-9]+ [0-9]+\n\n`)
 
 	// Complete index of second mount (note trailing slash)
-	resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/", tok, nil)
+	resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/", s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
-	c.Check(resp.Body.String(), Matches, TestHash2+`\+[0-9]+ [0-9]+\n\n`)
+	c.Check(resp.Body.String(), Matches, barHash+`\+[0-9]+ [0-9]+\n\n`)
 
 	// Partial index of second mount (no blocks match prefix)
-	resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+TestHash[:2], tok, nil)
+	resp = call(router, "GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+fooHash[:2], s.cluster.SystemRootToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 	c.Check(resp.Body.String(), Equals, "\n")
 }
 
 func (s *routerSuite) TestMetrics(c *C) {
 	reg := prometheus.NewRegistry()
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", reg, testServiceURL), IsNil)
-	instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), s.handler.Handler)
-	s.handler.Handler = instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
-
-	s.call("PUT", "/"+TestHash, "", TestBlock)
-	s.call("PUT", "/"+TestHash2, "", TestBlock2)
-	resp := s.call("GET", "/metrics.json", "", nil)
+	router, cancel := testRouter(c, s.cluster, reg)
+	defer cancel()
+	instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), router)
+	handler := instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
+
+	router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+		Hash: fooHash,
+		Data: []byte("foo"),
+	})
+	router.keepstore.BlockWrite(context.Background(), arvados.BlockWriteOptions{
+		Hash: barHash,
+		Data: []byte("bar"),
+	})
+	resp := call(handler, "GET", "/metrics.json", "", nil, nil)
 	c.Check(resp.Code, Equals, http.StatusUnauthorized)
-	resp = s.call("GET", "/metrics.json", "foobar", nil)
+	resp = call(handler, "GET", "/metrics.json", "foobar", nil, nil)
 	c.Check(resp.Code, Equals, http.StatusForbidden)
-	resp = s.call("GET", "/metrics.json", arvadostest.ManagementToken, nil)
+	resp = call(handler, "GET", "/metrics.json", arvadostest.ManagementToken, nil, nil)
 	c.Check(resp.Code, Equals, http.StatusOK)
 	var j []struct {
 		Name   string
@@ -140,13 +145,3 @@ func (s *routerSuite) TestMetrics(c *C) {
 		c.Check(ok, Equals, true, Commentf("checking metric %q", m))
 	}
 }
-
-func (s *routerSuite) call(method, path, tok string, body []byte) *httptest.ResponseRecorder {
-	resp := httptest.NewRecorder()
-	req, _ := http.NewRequest(method, path, bytes.NewReader(body))
-	if tok != "" {
-		req.Header.Set("Authorization", "Bearer "+tok)
-	}
-	s.handler.ServeHTTP(resp, req)
-	return resp
-}
diff --git a/services/keepstore/proxy_remote_test.go b/services/keepstore/proxy_remote_test.go
index 923d1b805e..d882b3a8f9 100644
--- a/services/keepstore/proxy_remote_test.go
+++ b/services/keepstore/proxy_remote_test.go
@@ -5,7 +5,6 @@
 package keepstore
 
 import (
-	"context"
 	"crypto/md5"
 	"encoding/json"
 	"fmt"
@@ -21,7 +20,6 @@ import (
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
 	"git.arvados.org/arvados.git/sdk/go/auth"
 	"git.arvados.org/arvados.git/sdk/go/keepclient"
-	"github.com/prometheus/client_golang/prometheus"
 	check "gopkg.in/check.v1"
 )
 
@@ -88,8 +86,6 @@ func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
 	s.remoteAPI = httptest.NewUnstartedServer(http.HandlerFunc(s.remoteAPIHandler))
 	s.remoteAPI.StartTLS()
 	s.cluster = testCluster(c)
-	s.cluster.Collections.BlobSigningKey = knownKey
-	s.cluster.SystemRootToken = arvadostest.SystemRootToken
 	s.cluster.RemoteClusters = map[string]arvados.RemoteCluster{
 		s.remoteClusterID: {
 			Host:     strings.Split(s.remoteAPI.URL, "//")[1],
@@ -99,8 +95,6 @@ func (s *ProxyRemoteSuite) SetUpTest(c *check.C) {
 		},
 	}
 	s.cluster.Volumes = map[string]arvados.Volume{"zzzzz-nyw5e-000000000000000": {Driver: "mock"}}
-	s.handler = &handler{}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 }
 
 func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
@@ -109,6 +103,9 @@ func (s *ProxyRemoteSuite) TearDownTest(c *check.C) {
 }
 
 func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
+	router, cancel := testRouter(c, s.cluster, nil)
+	defer cancel()
+
 	data := []byte("foo bar")
 	s.remoteKeepData = data
 	locator := fmt.Sprintf("%x+%d", md5.Sum(data), len(data))
@@ -184,7 +181,7 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
 			req.Header.Set("X-Keep-Signature", trial.xKeepSignature)
 		}
 		resp = httptest.NewRecorder()
-		s.handler.ServeHTTP(resp, req)
+		router.ServeHTTP(resp, req)
 		c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
 		c.Check(resp.Code, check.Equals, trial.expectCode)
 		if resp.Code == http.StatusOK {
@@ -203,13 +200,13 @@ func (s *ProxyRemoteSuite) TestProxyRemote(c *check.C) {
 
 		c.Check(locHdr, check.Not(check.Equals), "")
 		c.Check(locHdr, check.Not(check.Matches), `.*\+R.*`)
-		c.Check(VerifySignature(s.cluster, locHdr, trial.token), check.IsNil)
+		c.Check(arvados.VerifySignature(locHdr, trial.token, s.cluster.Collections.BlobSigningTTL.Duration(), []byte(s.cluster.Collections.BlobSigningKey)), check.IsNil)
 
 		// Ensure block can be requested using new signature
 		req = httptest.NewRequest("GET", "/"+locHdr, nil)
 		req.Header.Set("Authorization", "Bearer "+trial.token)
 		resp = httptest.NewRecorder()
-		s.handler.ServeHTTP(resp, req)
+		router.ServeHTTP(resp, req)
 		c.Check(resp.Code, check.Equals, http.StatusOK)
 		c.Check(s.remoteKeepRequests, check.Equals, trial.expectRemoteReqs)
 	}
diff --git a/services/keepstore/pull_worker.go b/services/keepstore/pull_worker.go
index 0207943bfa..60cd97edf7 100644
--- a/services/keepstore/pull_worker.go
+++ b/services/keepstore/pull_worker.go
@@ -27,7 +27,7 @@ type puller struct {
 	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
 }
 
-func newPuller(keepstore *keepstore, reg *prometheus.Registry) *puller {
+func newPuller(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *puller {
 	p := &puller{
 		keepstore: keepstore,
 		cond:      sync.NewCond(&sync.Mutex{}),
@@ -50,7 +50,7 @@ func newPuller(keepstore *keepstore, reg *prometheus.Registry) *puller {
 		return p
 	}
 	for i := 0; i < 1 || i < keepstore.cluster.Collections.BlobReplicateConcurrency; i++ {
-		go p.runWorker()
+		go p.runWorker(ctx)
 	}
 	return p
 }
@@ -62,7 +62,7 @@ func (p *puller) SetPullList(newlist []pullListItem) {
 	p.cond.Broadcast()
 }
 
-func (p *puller) runWorker() {
+func (p *puller) runWorker(ctx context.Context) {
 	if len(p.keepstore.mountsW) == 0 {
 		p.keepstore.logger.Infof("not running pull worker because there are no writable volumes")
 		return
@@ -82,11 +82,20 @@ func (p *puller) runWorker() {
 		Want_replicas: 1,
 		DiskCacheSize: keepclient.DiskCacheDisabled,
 	}
+	// Ensure the loop below wakes up and returns when ctx
+	// cancels, even if pull list is empty.
+	go func() {
+		<-ctx.Done()
+		p.cond.Broadcast()
+	}()
 	for {
 		p.cond.L.Lock()
-		for len(p.todo) == 0 {
+		for len(p.todo) == 0 && ctx.Err() == nil {
 			p.cond.Wait()
 		}
+		if ctx.Err() != nil {
+			return
+		}
 		item := p.todo[0]
 		p.todo = p.todo[1:]
 		p.cond.L.Unlock()
@@ -114,7 +123,7 @@ func (p *puller) runWorker() {
 		signedLocator := p.keepstore.signLocator(c.AuthToken, item.Locator)
 
 		buf := bytes.NewBuffer(nil)
-		_, err := keepClient.BlockRead(context.Background(), arvados.BlockReadOptions{
+		_, err := keepClient.BlockRead(ctx, arvados.BlockReadOptions{
 			Locator: signedLocator,
 			WriteTo: buf,
 		})
@@ -122,7 +131,7 @@ func (p *puller) runWorker() {
 			p.keepstore.logger.Warnf("error pulling data for pull list entry (%v): %s", item, err)
 			continue
 		}
-		err = dst.BlockWrite(context.Background(), item.Locator, buf.Bytes())
+		err = dst.BlockWrite(ctx, item.Locator, buf.Bytes())
 		if err != nil {
 			p.keepstore.logger.Warnf("error writing data for pull list entry (%v): %s", item, err)
 			continue
diff --git a/services/keepstore/pull_worker_test.go b/services/keepstore/pull_worker_test.go
index 1fae8261a2..befe81096c 100644
--- a/services/keepstore/pull_worker_test.go
+++ b/services/keepstore/pull_worker_test.go
@@ -9,14 +9,15 @@ import (
 )
 
 func (s *routerSuite) TestPullList_Clear(c *C) {
-	router, cancel := testRouter(c, s.cluster)
+	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
 	c.Fatal("todo")
+	router.ServeHTTP(nil, nil)
 }
 
 func (s *routerSuite) TestPullList_Execute(c *C) {
-	router, cancel := testRouter(c, s.cluster)
+	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
 	c.Fatal("todo: pull available block to unspecified volume")
@@ -25,4 +26,5 @@ func (s *routerSuite) TestPullList_Execute(c *C) {
 	c.Fatal("todo: log error connecting to remote")
 	c.Fatal("todo: log error writing block to local mount")
 	c.Fatal("todo: log error when destination mount does not exist")
+	router.ServeHTTP(nil, nil)
 }
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
index 567b4b22ad..aeb233e945 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/router_test.go
@@ -5,21 +5,32 @@
 package keepstore
 
 import (
+	"bytes"
 	"context"
 	"net/http"
 	"net/http/httptest"
+	"sort"
 	"strings"
 
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"github.com/prometheus/client_golang/prometheus"
 	. "gopkg.in/check.v1"
 )
 
-func testRouter(t TB, cluster *arvados.Cluster) (*router, context.CancelFunc) {
-	ks, cancel := testKeepstore(t, cluster)
-	puller := newPuller(ks, reg)
-	trasher := newTrasher(ks, reg)
-	return newRouter(ks, puller, trasher), cancel
+func testRouter(t TB, cluster *arvados.Cluster, reg *prometheus.Registry) (*router, context.CancelFunc) {
+	if reg == nil {
+		reg = prometheus.NewRegistry()
+	}
+	ctx, cancel := context.WithCancel(context.Background())
+	ks, kcancel := testKeepstore(t, cluster, reg)
+	go func() {
+		<-ctx.Done()
+		kcancel()
+	}()
+	puller := newPuller(ctx, ks, reg)
+	trasher := newTrasher(ctx, ks, reg)
+	return newRouter(ks, puller, trasher).(*router), cancel
 }
 
 var _ = Suite(&routerSuite{})
@@ -45,82 +56,60 @@ func (s *routerSuite) SetUpTest(c *C) {
 }
 
 func (s *routerSuite) TestBlockRead_Token(c *C) {
-	router, cancel := testRouter(c, s.cluster)
+	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
-	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
-
-	err := s.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+	err := router.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
 	c.Assert(err, IsNil)
-	locSigned := s.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+	locSigned := router.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
 	c.Assert(locSigned, Not(Equals), fooHash+"+3")
 
-	var req *http.Request
-	var resp httptest.ResponseRecorder
-
 	// No token provided
-	req = httptest.NewRequest("GET", "http://example/"+locSigned, nil)
-	resp = httptest.NewRecorder()
-	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusUnauthorized)
-	c.Check(string(resp.Bytes()), Matches, "no token provided")
+	resp := call(router, "GET", "http://example/"+locSigned, "", nil, nil)
+	c.Check(resp.Code, Equals, http.StatusUnauthorized)
+	c.Check(string(resp.Body.Bytes()), Matches, "no token provided")
 
 	// Different token => invalid signature
-	req = httptest.NewRequest("GET", "http://example/"+locSigned, nil)
-	req.Header.Set("Authorization", "Bearer badtoken")
-	resp = httptest.NewRecorder()
-	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusBadRequest)
-	c.Check(string(resp.Bytes()), Matches, "invalid signature")
+	resp = call(router, "GET", "http://example/"+locSigned, "badtoken", nil, nil)
+	c.Check(resp.Code, Equals, http.StatusBadRequest)
+	c.Check(string(resp.Body.Bytes()), Matches, "invalid signature")
 
 	// Correct token
-	req = httptest.NewRequest("GET", "http://example/"+locSigned, nil)
-	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
-	resp = httptest.NewRecorder()
-	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusOK)
-	c.Check(string(resp.Bytes()), Equals, "foo")
+	resp = call(router, "GET", "http://example/"+locSigned, arvadostest.ActiveTokenV2, nil, nil)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(string(resp.Body.Bytes()), Equals, "foo")
 }
 
 func (s *routerSuite) TestBlockWrite_Headers(c *C) {
-	router, cancel := testRouter(c, s.cluster)
+	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
-	var req *http.Request
-	var resp httptest.ResponseRecorder
-
 	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
 
-	req = httptest.NewRequest("GET", "http://example/"+fooHash, nil)
-	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
-	req.Header.Set("X-Arvados-Replicas-Desired", "2")
-	resp = httptest.NewRecorder()
-	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusOK)
-	c.Check(resp.Header.Get("X-Keep-Replicas-Stored"), Equals, "2")
-	c.Check(resp.Header.Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1; testclass2=1")
-
-	req = httptest.NewRequest("GET", "http://example/"+fooHash, nil)
-	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
-	req.Header.Set("X-Keep-Storage-Classes", "testclass1")
-	resp = httptest.NewRecorder()
-	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusOK)
-	c.Check(resp.Header.Get("X-Keep-Replicas-Stored"), Equals, "1")
-	c.Check(resp.Header.Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1")
-
-	req = httptest.NewRequest("GET", "http://example/"+fooHash, nil)
-	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
-	req.Header.Set("X-Keep-Storage-Classes", " , testclass2 , ")
-	resp = httptest.NewRecorder()
-	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusOK)
-	c.Check(resp.Header.Get("X-Keep-Replicas-Stored"), Equals, "1")
-	c.Check(resp.Header.Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass2=1")
+	resp := call(router, "GET", "http://example/"+fooHash, arvadostest.ActiveTokenV2, nil, http.Header{"X-Arvados-Replicas-Desired": []string{"2"}})
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "2")
+	c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, "testclass1=1, testclass2=1")
+
+	resp = call(router, "GET", "http://example/"+fooHash, arvadostest.ActiveTokenV2, nil, http.Header{"X-Keep-Storage-Classes": []string{"testclass1"}})
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1")
+
+	resp = call(router, "GET", "http://example/"+fooHash, arvadostest.ActiveTokenV2, nil, http.Header{"X-Keep-Storage-Classes": []string{" , testclass2 , "}})
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass2=1")
+}
+
+func sortCommaSeparated(s string) string {
+	slice := strings.Split(s, ", ")
+	sort.Strings(slice)
+	return strings.Join(slice, ", ")
 }
 
 func (s *routerSuite) TestBadRequest(c *C) {
-	router, cancel := testRouter(c, s.cluster)
+	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
 	for _, trial := range []string{
@@ -142,12 +131,12 @@ func (s *routerSuite) TestBadRequest(c *C) {
 		req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
 		resp := httptest.NewRecorder()
 		router.ServeHTTP(resp, req)
-		c.Check(resp.StatusCode, Equals, http.StatusBadRequest)
+		c.Check(resp.Code, Equals, http.StatusBadRequest)
 	}
 }
 
 func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
-	router, cancel := testRouter(c, s.cluster)
+	router, cancel := testRouter(c, s.cluster, nil)
 	defer cancel()
 
 	for _, token := range []string{"badtoken", ""} {
@@ -168,13 +157,13 @@ func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
 			}
 			resp := httptest.NewRecorder()
 			router.ServeHTTP(resp, req)
-			c.Check(resp.StatusCode, Equals, http.StatusUnauthorized)
+			c.Check(resp.Code, Equals, http.StatusUnauthorized)
 		}
 	}
 	req := httptest.NewRequest("TOUCH", "http://example/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", nil)
 	resp := httptest.NewRecorder()
 	router.ServeHTTP(resp, req)
-	c.Check(resp.StatusCode, Equals, http.StatusUnauthorized)
+	c.Check(resp.Code, Equals, http.StatusUnauthorized)
 }
 
 func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
@@ -184,3 +173,19 @@ func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
 func (s *routerSuite) TestCancelOnDisconnect(c *C) {
 	c.Fatal("todo: volume operation context is cancelled when client disconnects")
 }
+
+func call(handler http.Handler, method, path, tok string, body []byte, hdr http.Header) *httptest.ResponseRecorder {
+	resp := httptest.NewRecorder()
+	req, err := http.NewRequest(method, path, bytes.NewReader(body))
+	if err != nil {
+		panic(err)
+	}
+	for k := range hdr {
+		req.Header.Set(k, hdr.Get(k))
+	}
+	if tok != "" {
+		req.Header.Set("Authorization", "Bearer "+tok)
+	}
+	handler.ServeHTTP(resp, req)
+	return resp
+}
diff --git a/services/keepstore/s3aws_volume.go b/services/keepstore/s3aws_volume.go
index 89fe060552..65dd087d50 100644
--- a/services/keepstore/s3aws_volume.go
+++ b/services/keepstore/s3aws_volume.go
@@ -112,7 +112,7 @@ func newS3AWSVolume(params newVolumeParams) (volume, error) {
 	if err != nil {
 		return nil, err
 	}
-	v.logger = params.Logger.WithField("Volume", v.String())
+	v.logger = params.Logger.WithField("Volume", v.DeviceID())
 	return v, v.check("")
 }
 
@@ -237,11 +237,6 @@ func (v *S3AWSVolume) check(ec2metadataHostname string) error {
 	return nil
 }
 
-// String implements fmt.Stringer.
-func (v *S3AWSVolume) String() string {
-	return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
-}
-
 // DeviceID returns a globally unique ID for the storage bucket.
 func (v *S3AWSVolume) DeviceID() string {
 	return "s3://" + v.Endpoint + "/" + v.Bucket
@@ -356,7 +351,7 @@ func (v *S3AWSVolume) EmptyTrash() {
 	if err := trashL.Error(); err != nil {
 		v.logger.WithError(err).Error("EmptyTrash: lister failed")
 	}
-	v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+	v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.DeviceID(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
 // fixRace(X) is called when "recent/X" exists but "X" doesn't
@@ -732,17 +727,6 @@ func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
 	return *resp.LastModified, err
 }
 
-// Status returns a *VolumeStatus representing the current in-use
-// storage capacity and a fake available capacity that doesn't make
-// the volume seem full or nearly-full.
-func (v *S3AWSVolume) Status() *VolumeStatus {
-	return &VolumeStatus{
-		DeviceNum: 1,
-		BytesFree: BlockSize * 1000,
-		BytesUsed: 1,
-	}
-}
-
 // InternalStats returns bucket I/O and API call counters.
 func (v *S3AWSVolume) InternalStats() interface{} {
 	return &v.bucket.stats
diff --git a/services/keepstore/s3aws_volume_test.go b/services/keepstore/s3aws_volume_test.go
index f322c5ec3b..60b8534993 100644
--- a/services/keepstore/s3aws_volume_test.go
+++ b/services/keepstore/s3aws_volume_test.go
@@ -111,7 +111,7 @@ func (s *StubbedS3AWSSuite) TestIndex(c *check.C) {
 		{"abc", 0},
 	} {
 		buf := new(bytes.Buffer)
-		err := v.IndexTo(spec.prefix, buf)
+		err := v.Index(context.Background(), spec.prefix, buf)
 		c.Check(err, check.IsNil)
 
 		idx := bytes.SplitAfter(buf.Bytes(), []byte{10})
@@ -146,7 +146,7 @@ func (s *StubbedS3AWSSuite) TestSignature(c *check.C) {
 	vol.bucket.svc.ForcePathStyle = true
 
 	c.Check(err, check.IsNil)
-	err = vol.Put(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
+	err = vol.BlockWrite(context.Background(), "acbd18db4cc2f85cedef654fccc4a4d8", []byte("foo"))
 	c.Check(err, check.IsNil)
 	c.Check(header.Get("Authorization"), check.Matches, `AWS4-HMAC-SHA256 .*`)
 }
@@ -212,20 +212,20 @@ func (s *StubbedS3AWSSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
 
 	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	_, err := v.Get(context.Background(), loc, make([]byte, 3))
+	err := v.BlockWrite(context.Background(), loc, make([]byte, 3))
 	c.Check(err, check.NotNil)
 	c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
 	c.Check(stats(), check.Matches, `.*"s3.requestFailure 404 NoSuchKey[^"]*":[^0].*`)
 	c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
 
-	err = v.Put(context.Background(), loc, []byte("foo"))
+	err = v.BlockWrite(context.Background(), loc, []byte("foo"))
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
 	c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
 
-	_, err = v.Get(context.Background(), loc, make([]byte, 3))
+	_, err = v.BlockRead(context.Background(), loc, io.Discard)
 	c.Check(err, check.IsNil)
-	_, err = v.Get(context.Background(), loc, make([]byte, 3))
+	_, err = v.BlockRead(context.Background(), loc, io.Discard)
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
 }
@@ -251,30 +251,15 @@ func (h *s3AWSBlockingHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
 }
 
 func (s *StubbedS3AWSSuite) TestGetContextCancel(c *check.C) {
-	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	buf := make([]byte, 3)
-
 	s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
-		_, err := v.Get(ctx, loc, buf)
+		_, err := v.BlockRead(ctx, fooHash, io.Discard)
 		return err
 	})
 }
 
-func (s *StubbedS3AWSSuite) TestCompareContextCancel(c *check.C) {
-	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	buf := []byte("bar")
-
-	s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
-		return v.Compare(ctx, loc, buf)
-	})
-}
-
 func (s *StubbedS3AWSSuite) TestPutContextCancel(c *check.C) {
-	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	buf := []byte("foo")
-
 	s.testContextCancel(c, func(ctx context.Context, v *TestableS3AWSVolume) error {
-		return v.Put(ctx, loc, buf)
+		return v.BlockWrite(ctx, fooHash, []byte("foo"))
 	})
 }
 
@@ -475,8 +460,7 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 
 			// Check canGet
 			loc, blk := setupScenario()
-			buf := make([]byte, len(blk))
-			_, err := v.Get(context.Background(), loc, buf)
+			_, err := v.BlockRead(context.Background(), loc, io.Discard)
 			c.Check(err == nil, check.Equals, scenario.canGet)
 			if err != nil {
 				c.Check(os.IsNotExist(err), check.Equals, true)
@@ -484,9 +468,9 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 
 			// Call Trash, then check canTrash and canGetAfterTrash
 			loc, _ = setupScenario()
-			err = v.Trash(loc)
+			err = v.BlockTrash(loc)
 			c.Check(err == nil, check.Equals, scenario.canTrash)
-			_, err = v.Get(context.Background(), loc, buf)
+			_, err = v.BlockRead(context.Background(), loc, io.Discard)
 			c.Check(err == nil, check.Equals, scenario.canGetAfterTrash)
 			if err != nil {
 				c.Check(os.IsNotExist(err), check.Equals, true)
@@ -494,14 +478,14 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 
 			// Call Untrash, then check canUntrash
 			loc, _ = setupScenario()
-			err = v.Untrash(loc)
+			err = v.BlockUntrash(loc)
 			c.Check(err == nil, check.Equals, scenario.canUntrash)
 			if scenario.dataT != none || scenario.trashT != none {
 				// In all scenarios where the data exists, we
 				// should be able to Get after Untrash --
 				// regardless of timestamps, errors, race
 				// conditions, etc.
-				_, err = v.Get(context.Background(), loc, buf)
+				_, err = v.BlockRead(context.Background(), loc, io.Discard)
 				c.Check(err, check.IsNil)
 			}
 
@@ -522,7 +506,7 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
 			// Check for current Mtime after Put (applies to all
 			// scenarios)
 			loc, blk = setupScenario()
-			err = v.Put(context.Background(), loc, blk)
+			err = v.BlockWrite(context.Background(), loc, blk)
 			c.Check(err, check.IsNil)
 			t, err := v.Mtime(loc)
 			c.Check(err, check.IsNil)
diff --git a/services/keepstore/status_test.go b/services/keepstore/status_test.go
index a617798ab5..32be1dc8e8 100644
--- a/services/keepstore/status_test.go
+++ b/services/keepstore/status_test.go
@@ -6,16 +6,18 @@ package keepstore
 
 import (
 	"encoding/json"
+	"net/http"
 )
 
 // We don't have isolated unit tests for /status.json yet, but we do
 // check (e.g., in pull_worker_test.go) that /status.json reports
 // specific statistics correctly at the appropriate times.
 
-// getStatusItem("foo","bar","baz") retrieves /status.json, decodes
-// the response body into resp, and returns resp["foo"]["bar"]["baz"].
-func getStatusItem(h *router, keys ...string) interface{} {
-	resp := IssueRequest(h, &RequestTester{"/status.json", "", "GET", nil, ""})
+// getStatusItem(h, "foo","bar","baz") retrieves /status.json from h,
+// decodes the response body into resp, and returns
+// resp["foo"]["bar"]["baz"].
+func getStatusItem(h http.Handler, keys ...string) interface{} {
+	resp := call(h, "GET", "/status.json", "", nil, nil)
 	var s interface{}
 	json.NewDecoder(resp.Body).Decode(&s)
 	for _, k := range keys {
diff --git a/services/keepstore/streamwriterat_test.go b/services/keepstore/streamwriterat_test.go
index 1769a9c205..924fce9b71 100644
--- a/services/keepstore/streamwriterat_test.go
+++ b/services/keepstore/streamwriterat_test.go
@@ -53,7 +53,7 @@ func (s *streamWriterAtSuite) TestPartSizes(c *C) {
 							if endpos > datasize {
 								endpos = datasize
 							}
-							swa.WriteAt(indata[pos:endpos], pos)
+							swa.WriteAt(indata[pos:endpos], int64(pos))
 						}()
 					}
 					go func() {
@@ -70,8 +70,8 @@ func (s *streamWriterAtSuite) TestPartSizes(c *C) {
 func (s *streamWriterAtSuite) TestOverflow(c *C) {
 	for offset := -1; offset < 2; offset++ {
 		buf := make([]byte, 50)
-		swa := newStreamWriterAt(bytes.NewBuffer(), 20, buf)
-		_, err := swa.WriteAt([]byte("foo"), len(buf)+offset)
+		swa := newStreamWriterAt(bytes.NewBuffer(nil), 20, buf)
+		_, err := swa.WriteAt([]byte("foo"), int64(len(buf)+offset))
 		c.Check(err, NotNil)
 		err = swa.Close()
 		c.Check(err, NotNil)
@@ -82,10 +82,10 @@ func (s *streamWriterAtSuite) TestIncompleteWrite(c *C) {
 	for _, partsize := range []int{20, 25} {
 		for _, bufsize := range []int{50, 55, 60} {
 			for offset := 0; offset < 3; offset++ {
-				swa := newStreamWriterAt(bytes.NewBuffer(), partsize, make([]byte, bufsize))
+				swa := newStreamWriterAt(bytes.NewBuffer(nil), partsize, make([]byte, bufsize))
 				_, err := swa.WriteAt(make([]byte, 1), 49)
 				c.Check(err, IsNil)
-				_, err = swa.WriteAt(make([]byte, 46), offset)
+				_, err = swa.WriteAt(make([]byte, 46), int64(offset))
 				c.Check(err, IsNil)
 				err = swa.Close()
 				c.Check(err, NotNil)
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
index cee954def1..1d180fc60d 100644
--- a/services/keepstore/trash_worker.go
+++ b/services/keepstore/trash_worker.go
@@ -5,6 +5,7 @@
 package keepstore
 
 import (
+	"context"
 	"sync"
 	"time"
 
@@ -24,7 +25,7 @@ type trasher struct {
 	cond      *sync.Cond // lock guards todo accesses; cond broadcasts when todo becomes non-empty
 }
 
-func newTrasher(keepstore *keepstore, reg *prometheus.Registry) *trasher {
+func newTrasher(ctx context.Context, keepstore *keepstore, reg *prometheus.Registry) *trasher {
 	t := &trasher{
 		keepstore: keepstore,
 		cond:      sync.NewCond(&sync.Mutex{}),
@@ -47,7 +48,7 @@ func newTrasher(keepstore *keepstore, reg *prometheus.Registry) *trasher {
 		return t
 	}
 	for i := 0; i < keepstore.cluster.Collections.BlobTrashConcurrency; i++ {
-		go t.runWorker()
+		go t.runWorker(ctx)
 	}
 	return t
 }
@@ -59,7 +60,7 @@ func (t *trasher) SetTrashList(newlist []trashListItem) {
 	t.cond.Broadcast()
 }
 
-func (t *trasher) runWorker() {
+func (t *trasher) runWorker(ctx context.Context) {
 	var mntsAllowTrash []*mount
 	for _, mnt := range t.keepstore.mounts {
 		if mnt.AllowTrash {
@@ -70,11 +71,18 @@ func (t *trasher) runWorker() {
 		t.keepstore.logger.Info("not running trash worker because there are no writable or trashable volumes")
 		return
 	}
+	go func() {
+		<-ctx.Done()
+		t.cond.Broadcast()
+	}()
 	for {
 		t.cond.L.Lock()
-		for len(t.todo) == 0 {
+		for len(t.todo) == 0 && ctx.Err() == nil {
 			t.cond.Wait()
 		}
+		if ctx.Err() != nil {
+			return
+		}
 		item := t.todo[0]
 		t.todo = t.todo[1:]
 		t.cond.L.Unlock()
@@ -127,7 +135,7 @@ func (t *trasher) runWorker() {
 
 type trashEmptier struct{}
 
-func newTrashEmptier(ks *keepstore, reg *prometheus.Registry) *trashEmptier {
+func newTrashEmptier(ctx context.Context, ks *keepstore, reg *prometheus.Registry) *trashEmptier {
 	d := ks.cluster.Collections.BlobTrashCheckInterval.Duration()
 	if d <= 0 ||
 		!ks.cluster.Collections.BlobTrash ||
@@ -136,7 +144,13 @@ func newTrashEmptier(ks *keepstore, reg *prometheus.Registry) *trashEmptier {
 		return &trashEmptier{}
 	}
 	go func() {
-		for range time.NewTicker(d).C {
+		ticker := time.NewTicker(d)
+		for {
+			select {
+			case <-ctx.Done():
+				return
+			case <-ticker.C:
+			}
 			for _, mnt := range ks.mounts {
 				if mnt.KeepMount.AllowTrash {
 					mnt.volume.EmptyTrash()
diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index 1fa5cfc0b2..08b66d6dc5 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -42,7 +42,7 @@ func newDirectoryVolume(params newVolumeParams) (volume, error) {
 	if err != nil {
 		return nil, err
 	}
-	v.logger = v.logger.WithField("Volume", v.String())
+	v.logger = v.logger.WithField("Volume", v.DeviceID())
 	return v, v.check()
 }
 
@@ -293,39 +293,6 @@ func (v *UnixVolume) BlockWrite(ctx context.Context, hash string, data []byte) e
 	return nil
 }
 
-// Status returns a VolumeStatus struct describing the volume's
-// current state, or nil if an error occurs.
-func (v *UnixVolume) Status() *VolumeStatus {
-	fi, err := v.os.Stat(v.Root)
-	if err != nil {
-		v.logger.WithError(err).Error("stat failed")
-		return nil
-	}
-	// uint64() cast here supports GOOS=darwin where Dev is
-	// int32. If the device number is negative, the unsigned
-	// devnum won't be the real device number any more, but that's
-	// fine -- all we care about is getting the same number each
-	// time.
-	devnum := uint64(fi.Sys().(*syscall.Stat_t).Dev)
-
-	var fs syscall.Statfs_t
-	if err := syscall.Statfs(v.Root, &fs); err != nil {
-		v.logger.WithError(err).Error("statfs failed")
-		return nil
-	}
-	// These calculations match the way df calculates disk usage:
-	// "free" space is measured by fs.Bavail, but "used" space
-	// uses fs.Blocks - fs.Bfree.
-	free := fs.Bavail * uint64(fs.Bsize)
-	used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
-	return &VolumeStatus{
-		MountPoint: v.Root,
-		DeviceNum:  devnum,
-		BytesFree:  free,
-		BytesUsed:  used,
-	}
-}
-
 var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)
 var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
 
@@ -521,7 +488,7 @@ func (v *UnixVolume) IsFull() (isFull bool) {
 	if avail, err := v.FreeDiskSpace(); err == nil {
 		isFull = avail < BlockSize
 	} else {
-		v.logger.WithError(err).Errorf("%s: FreeDiskSpace failed", v)
+		v.logger.WithError(err).Errorf("%s: FreeDiskSpace failed", v.DeviceID())
 		isFull = false
 	}
 
@@ -546,10 +513,6 @@ func (v *UnixVolume) FreeDiskSpace() (free uint64, err error) {
 	return
 }
 
-func (v *UnixVolume) String() string {
-	return fmt.Sprintf("[UnixVolume %s]", v.Root)
-}
-
 // InternalStats returns I/O and filesystem ops counters.
 func (v *UnixVolume) InternalStats() interface{} {
 	return &v.os.stats
diff --git a/services/keepstore/unix_volume_test.go b/services/keepstore/unix_volume_test.go
index 75d9b22de5..dee146acd6 100644
--- a/services/keepstore/unix_volume_test.go
+++ b/services/keepstore/unix_volume_test.go
@@ -36,7 +36,7 @@ func (v *TestableUnixVolume) PutRaw(locator string, data []byte) {
 		v.volume.ReadOnly = orig
 	}(v.volume.ReadOnly)
 	v.volume.ReadOnly = false
-	err := v.Put(context.Background(), locator, data)
+	err := v.BlockWrite(context.Background(), locator, data)
 	if err != nil {
 		v.t.Fatal(err)
 	}
@@ -132,15 +132,15 @@ func (s *UnixVolumeSuite) TestUnixVolumeHandlersWithGenericVolumeTests(c *check.
 func (s *UnixVolumeSuite) TestGetNotFound(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
 	defer v.Teardown()
-	v.Put(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 
-	buf := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), TestHash2, buf)
+	buf := bytes.NewBuffer(nil)
+	_, err := v.BlockRead(context.Background(), TestHash2, buf)
 	switch {
 	case os.IsNotExist(err):
 		break
 	case err == nil:
-		c.Errorf("Read should have failed, returned %+q", buf[:n])
+		c.Errorf("Read should have failed, returned %+q", buf.Bytes())
 	default:
 		c.Errorf("Read expected ErrNotExist, got: %s", err)
 	}
@@ -150,7 +150,7 @@ func (s *UnixVolumeSuite) TestPut(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
 	defer v.Teardown()
 
-	err := v.Put(context.Background(), TestHash, TestBlock)
+	err := v.BlockWrite(context.Background(), TestHash, TestBlock)
 	if err != nil {
 		c.Error(err)
 	}
@@ -169,38 +169,10 @@ func (s *UnixVolumeSuite) TestPutBadVolume(c *check.C) {
 
 	err := os.RemoveAll(v.Root)
 	c.Assert(err, check.IsNil)
-	err = v.Put(context.Background(), TestHash, TestBlock)
+	err = v.BlockWrite(context.Background(), TestHash, TestBlock)
 	c.Check(err, check.IsNil)
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeReadonly(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{ReadOnly: true, Replication: 1}, s.metrics, false)
-	defer v.Teardown()
-
-	v.PutRaw(TestHash, TestBlock)
-
-	buf := make([]byte, BlockSize)
-	_, err := v.Get(context.Background(), TestHash, buf)
-	if err != nil {
-		c.Errorf("got err %v, expected nil", err)
-	}
-
-	err = v.Put(context.Background(), TestHash, TestBlock)
-	if err != MethodDisabledError {
-		c.Errorf("got err %v, expected MethodDisabledError", err)
-	}
-
-	err = v.Touch(TestHash)
-	if err != MethodDisabledError {
-		c.Errorf("got err %v, expected MethodDisabledError", err)
-	}
-
-	err = v.Trash(TestHash)
-	if err != MethodDisabledError {
-		c.Errorf("got err %v, expected MethodDisabledError", err)
-	}
-}
-
 func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
 	defer v.Teardown()
@@ -221,31 +193,11 @@ func (s *UnixVolumeSuite) TestIsFull(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestNodeStatus(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-	defer v.Teardown()
-
-	// Get node status and make a basic sanity check.
-	volinfo := v.Status()
-	if volinfo.MountPoint != v.Root {
-		c.Errorf("GetNodeStatus mount_point %s, expected %s", volinfo.MountPoint, v.Root)
-	}
-	if volinfo.DeviceNum == 0 {
-		c.Errorf("uninitialized device_num in %v", volinfo)
-	}
-	if volinfo.BytesFree == 0 {
-		c.Errorf("uninitialized bytes_free in %v", volinfo)
-	}
-	if volinfo.BytesUsed == 0 {
-		c.Errorf("uninitialized bytes_used in %v", volinfo)
-	}
-}
-
 func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerError(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
 	defer v.Teardown()
 
-	v.Put(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	mockErr := errors.New("Mock error")
 	err := v.getFunc(context.Background(), v.blockPath(TestHash), func(rdr io.Reader) error {
 		return mockErr
@@ -276,7 +228,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
 	defer v.Teardown()
 
-	v.Put(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 
 	mtx := NewMockMutex()
 	v.locker = mtx
@@ -307,39 +259,7 @@ func (s *UnixVolumeSuite) TestUnixVolumeGetFuncWorkerWaitsOnMutex(c *check.C) {
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeCompare(c *check.C) {
-	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
-	defer v.Teardown()
-
-	v.Put(context.Background(), TestHash, TestBlock)
-	err := v.Compare(context.Background(), TestHash, TestBlock)
-	if err != nil {
-		c.Errorf("Got err %q, expected nil", err)
-	}
-
-	err = v.Compare(context.Background(), TestHash, []byte("baddata"))
-	if err != CollisionError {
-		c.Errorf("Got err %q, expected %q", err, CollisionError)
-	}
-
-	v.Put(context.Background(), TestHash, []byte("baddata"))
-	err = v.Compare(context.Background(), TestHash, TestBlock)
-	if err != DiskHashError {
-		c.Errorf("Got err %q, expected %q", err, DiskHashError)
-	}
-
-	if os.Getuid() == 0 {
-		c.Log("skipping 'permission denied' check when running as root")
-	} else {
-		p := fmt.Sprintf("%s/%s/%s", v.Root, TestHash[:3], TestHash)
-		err = os.Chmod(p, 000)
-		c.Assert(err, check.IsNil)
-		err = v.Compare(context.Background(), TestHash, TestBlock)
-		c.Check(err, check.ErrorMatches, ".*permission denied.*")
-	}
-}
-
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelPut(c *check.C) {
+func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockWrite(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, true)
 	defer v.Teardown()
 	v.locker.Lock()
@@ -350,13 +270,13 @@ func (s *UnixVolumeSuite) TestUnixVolumeContextCancelPut(c *check.C) {
 		time.Sleep(50 * time.Millisecond)
 		v.locker.Unlock()
 	}()
-	err := v.Put(ctx, TestHash, TestBlock)
+	err := v.BlockWrite(ctx, TestHash, TestBlock)
 	if err != context.Canceled {
-		c.Errorf("Put() returned %s -- expected short read / canceled", err)
+		c.Errorf("BlockWrite() returned %s -- expected short read / canceled", err)
 	}
 }
 
-func (s *UnixVolumeSuite) TestUnixVolumeContextCancelGet(c *check.C) {
+func (s *UnixVolumeSuite) TestUnixVolumeContextCancelBlockRead(c *check.C) {
 	v := s.newTestableUnixVolume(c, s.cluster, arvados.Volume{Replication: 1}, s.metrics, false)
 	defer v.Teardown()
 	bpath := v.blockPath(TestHash)
@@ -372,10 +292,9 @@ func (s *UnixVolumeSuite) TestUnixVolumeContextCancelGet(c *check.C) {
 		time.Sleep(50 * time.Millisecond)
 		cancel()
 	}()
-	buf := make([]byte, len(TestBlock))
-	n, err := v.Get(ctx, TestHash, buf)
+	n, err := v.BlockRead(ctx, TestHash, io.Discard)
 	if n == len(TestBlock) || err != context.Canceled {
-		c.Errorf("Get() returned %d, %s -- expected short read / canceled", n, err)
+		c.Errorf("BlockRead() returned %d, %s -- expected short read / canceled", n, err)
 	}
 }
 
@@ -390,8 +309,7 @@ func (s *UnixVolumeSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"StatOps":1,.*`) // (*UnixVolume)check() calls Stat() once
 	c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
 
-	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	_, err := vol.Get(context.Background(), loc, make([]byte, 3))
+	_, err := vol.BlockRead(context.Background(), fooHash, io.Discard)
 	c.Check(err, check.NotNil)
 	c.Check(stats(), check.Matches, `.*"StatOps":[^0],.*`)
 	c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
@@ -400,27 +318,27 @@ func (s *UnixVolumeSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
 	c.Check(stats(), check.Matches, `.*"CreateOps":0,.*`)
 
-	err = vol.Put(context.Background(), loc, []byte("foo"))
+	err = vol.BlockWrite(context.Background(), fooHash, []byte("foo"))
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
 	c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
 	c.Check(stats(), check.Matches, `.*"OpenOps":0,.*`)
 	c.Check(stats(), check.Matches, `.*"UtimesOps":1,.*`)
 
-	err = vol.Touch(loc)
+	err = vol.BlockTouch(fooHash)
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"FlockOps":1,.*`)
 	c.Check(stats(), check.Matches, `.*"OpenOps":1,.*`)
 	c.Check(stats(), check.Matches, `.*"UtimesOps":2,.*`)
 
-	_, err = vol.Get(context.Background(), loc, make([]byte, 3))
-	c.Check(err, check.IsNil)
-	err = vol.Compare(context.Background(), loc, []byte("foo"))
+	buf := bytes.NewBuffer(nil)
+	_, err = vol.BlockRead(context.Background(), fooHash, buf)
 	c.Check(err, check.IsNil)
-	c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
-	c.Check(stats(), check.Matches, `.*"OpenOps":3,.*`)
+	c.Check(buf.String(), check.Equals, "foo")
+	c.Check(stats(), check.Matches, `.*"InBytes":3,.*`)
+	c.Check(stats(), check.Matches, `.*"OpenOps":2,.*`)
 
-	err = vol.Trash(loc)
+	err = vol.BlockTrash(fooHash)
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"FlockOps":2,.*`)
 }
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
index 16fd1207c2..41a0eba86f 100644
--- a/services/keepstore/volume.go
+++ b/services/keepstore/volume.go
@@ -29,6 +29,7 @@ type volume interface {
 type volumeDriver func(newVolumeParams) (volume, error)
 
 type newVolumeParams struct {
+	UUID         string
 	Cluster      *arvados.Cluster
 	ConfigVolume arvados.Volume
 	Logger       logrus.FieldLogger
@@ -36,14 +37,6 @@ type newVolumeParams struct {
 	BufferPool   *bufferPool
 }
 
-// VolumeStatus describes the current condition of a volume
-type VolumeStatus struct {
-	MountPoint string
-	DeviceNum  uint64
-	BytesFree  uint64
-	BytesUsed  uint64
-}
-
 // ioStats tracks I/O statistics for a volume or server
 type ioStats struct {
 	Errors     uint64
diff --git a/services/keepstore/volume_generic_test.go b/services/keepstore/volume_generic_test.go
index 2180412431..ada8ce3035 100644
--- a/services/keepstore/volume_generic_test.go
+++ b/services/keepstore/volume_generic_test.go
@@ -9,6 +9,7 @@ import (
 	"context"
 	"crypto/md5"
 	"fmt"
+	"io"
 	"os"
 	"regexp"
 	"sort"
@@ -51,16 +52,6 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 	s.testGet(t, factory)
 	s.testGetNoSuchBlock(t, factory)
 
-	s.testCompareNonexistent(t, factory)
-	s.testCompareSameContent(t, factory, TestHash, TestBlock)
-	s.testCompareSameContent(t, factory, EmptyHash, EmptyBlock)
-	s.testCompareWithCollision(t, factory, TestHash, TestBlock, []byte("baddata"))
-	s.testCompareWithCollision(t, factory, TestHash, TestBlock, EmptyBlock)
-	s.testCompareWithCollision(t, factory, EmptyHash, EmptyBlock, TestBlock)
-	s.testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, []byte("baddata"))
-	s.testCompareWithCorruptStoredData(t, factory, TestHash, TestBlock, EmptyBlock)
-	s.testCompareWithCorruptStoredData(t, factory, EmptyHash, EmptyBlock, []byte("baddata"))
-
 	if !readonly {
 		s.testPutBlockWithSameContent(t, factory, TestHash, TestBlock)
 		s.testPutBlockWithSameContent(t, factory, EmptyHash, EmptyBlock)
@@ -76,7 +67,7 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 
 	s.testMtimeNoSuchBlock(t, factory)
 
-	s.testIndexTo(t, factory)
+	s.testIndex(t, factory)
 
 	if !readonly {
 		s.testDeleteNewBlock(t, factory)
@@ -84,12 +75,8 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 	}
 	s.testDeleteNoSuchBlock(t, factory)
 
-	s.testStatus(t, factory)
-
 	s.testMetrics(t, readonly, factory)
 
-	s.testString(t, factory)
-
 	if readonly {
 		s.testUpdateReadOnly(t, factory)
 	}
@@ -97,12 +84,10 @@ func DoGenericVolumeTests(t TB, readonly bool, factory TestableVolumeFactory) {
 	s.testGetConcurrent(t, factory)
 	if !readonly {
 		s.testPutConcurrent(t, factory)
-
 		s.testPutFullBlock(t, factory)
+		s.testTrashUntrash(t, readonly, factory)
+		s.testTrashEmptyTrashUntrash(t, factory)
 	}
-
-	s.testTrashUntrash(t, readonly, factory)
-	s.testTrashEmptyTrashUntrash(t, factory)
 }
 
 type genericVolumeSuite struct {
@@ -131,16 +116,15 @@ func (s *genericVolumeSuite) testGet(t TB, factory TestableVolumeFactory) {
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), fooHash, []byte("foo"))
 
-	buf := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), TestHash, buf)
+	buf := bytes.NewBuffer(nil)
+	_, err := v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
 		t.Fatal(err)
 	}
-
-	if bytes.Compare(buf[:n], TestBlock) != 0 {
-		t.Errorf("expected %s, got %s", string(TestBlock), string(buf))
+	if buf.String() != "foo" {
+		t.Errorf("expected %s, got %s", "foo", buf.String())
 	}
 }
 
@@ -151,75 +135,8 @@ func (s *genericVolumeSuite) testGetNoSuchBlock(t TB, factory TestableVolumeFact
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	buf := make([]byte, BlockSize)
-	if _, err := v.Get(context.Background(), TestHash2, buf); err == nil {
-		t.Errorf("Expected error while getting non-existing block %v", TestHash2)
-	}
-}
-
-// Compare() should return os.ErrNotExist if the block does not exist.
-// Otherwise, writing new data causes CompareAndTouch() to generate
-// error logs even though everything is working fine.
-func (s *genericVolumeSuite) testCompareNonexistent(t TB, factory TestableVolumeFactory) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	err := v.Compare(context.Background(), TestHash, TestBlock)
-	if err != os.ErrNotExist {
-		t.Errorf("Got err %T %q, expected os.ErrNotExist", err, err)
-	}
-}
-
-// Put a test block and compare the locator with same content
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareSameContent(t TB, factory TestableVolumeFactory, testHash string, testData []byte) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	v.PutRaw(testHash, testData)
-
-	// Compare the block locator with same content
-	err := v.Compare(context.Background(), testHash, testData)
-	if err != nil {
-		t.Errorf("Got err %q, expected nil", err)
-	}
-}
-
-// Test behavior of Compare() when stored data matches expected
-// checksum but differs from new data we need to store. Requires
-// testHash = md5(testDataA).
-//
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareWithCollision(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	v.PutRaw(testHash, testDataA)
-
-	// Compare the block locator with different content; collision
-	err := v.Compare(context.Background(), TestHash, testDataB)
-	if err == nil {
-		t.Errorf("Got err nil, expected error due to collision")
-	}
-}
-
-// Test behavior of Compare() when stored data has become
-// corrupted. Requires testHash = md5(testDataA) != md5(testDataB).
-//
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testCompareWithCorruptStoredData(t TB, factory TestableVolumeFactory, testHash string, testDataA, testDataB []byte) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	v.PutRaw(TestHash, testDataB)
-
-	err := v.Compare(context.Background(), testHash, testDataA)
-	if err == nil || err == CollisionError {
-		t.Errorf("Got err %+v, expected non-collision error", err)
+	if _, err := v.BlockRead(context.Background(), barHash, io.Discard); err == nil {
+		t.Errorf("Expected error while getting non-existing block %v", barHash)
 	}
 }
 
@@ -230,12 +147,12 @@ func (s *genericVolumeSuite) testPutBlockWithSameContent(t TB, factory TestableV
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	err := v.Put(context.Background(), testHash, testData)
+	err := v.BlockWrite(context.Background(), testHash, testData)
 	if err != nil {
 		t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
 	}
 
-	err = v.Put(context.Background(), testHash, testData)
+	err = v.BlockWrite(context.Background(), testHash, testData)
 	if err != nil {
 		t.Errorf("Got err putting block second time %q: %q, expected nil", TestBlock, err)
 	}
@@ -248,23 +165,23 @@ func (s *genericVolumeSuite) testPutBlockWithDifferentContent(t TB, factory Test
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.PutRaw(testHash, testDataA)
+	v.BlockWrite(context.Background(), testHash, testDataA)
 
-	putErr := v.Put(context.Background(), testHash, testDataB)
-	buf := make([]byte, BlockSize)
-	n, getErr := v.Get(context.Background(), testHash, buf)
+	putErr := v.BlockWrite(context.Background(), testHash, testDataB)
+	buf := bytes.NewBuffer(nil)
+	_, getErr := v.BlockRead(context.Background(), testHash, buf)
 	if putErr == nil {
 		// Put must not return a nil error unless it has
 		// overwritten the existing data.
-		if bytes.Compare(buf[:n], testDataB) != 0 {
-			t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf[:n], testDataB)
+		if buf.String() != string(testDataB) {
+			t.Errorf("Put succeeded but Get returned %+q, expected %+q", buf, testDataB)
 		}
 	} else {
 		// It is permissible for Put to fail, but it must
 		// leave us with either the original data, the new
 		// data, or nothing at all.
-		if getErr == nil && bytes.Compare(buf[:n], testDataA) != 0 && bytes.Compare(buf[:n], testDataB) != 0 {
-			t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf[:n], testDataA, testDataB)
+		if getErr == nil && buf.String() != string(testDataA) && buf.String() != string(testDataB) {
+			t.Errorf("Put failed but Get returned %+q, which is neither %+q nor %+q", buf, testDataA, testDataB)
 		}
 	}
 }
@@ -276,46 +193,48 @@ func (s *genericVolumeSuite) testPutMultipleBlocks(t TB, factory TestableVolumeF
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	err := v.Put(context.Background(), TestHash, TestBlock)
+	err := v.BlockWrite(context.Background(), TestHash, TestBlock)
 	if err != nil {
 		t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
 	}
 
-	err = v.Put(context.Background(), TestHash2, TestBlock2)
+	err = v.BlockWrite(context.Background(), TestHash2, TestBlock2)
 	if err != nil {
 		t.Errorf("Got err putting block %q: %q, expected nil", TestBlock2, err)
 	}
 
-	err = v.Put(context.Background(), TestHash3, TestBlock3)
+	err = v.BlockWrite(context.Background(), TestHash3, TestBlock3)
 	if err != nil {
 		t.Errorf("Got err putting block %q: %q, expected nil", TestBlock3, err)
 	}
 
-	data := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), TestHash, data)
+	buf := bytes.NewBuffer(nil)
+	_, err = v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
 		t.Error(err)
 	} else {
-		if bytes.Compare(data[:n], TestBlock) != 0 {
-			t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock)
+		if bytes.Compare(buf.Bytes(), TestBlock) != 0 {
+			t.Errorf("Block present, but got %+q, expected %+q", buf, TestBlock)
 		}
 	}
 
-	n, err = v.Get(context.Background(), TestHash2, data)
+	buf.Reset()
+	_, err = v.BlockRead(context.Background(), TestHash2, buf)
 	if err != nil {
 		t.Error(err)
 	} else {
-		if bytes.Compare(data[:n], TestBlock2) != 0 {
-			t.Errorf("Block present, but got %+q, expected %+q", data[:n], TestBlock2)
+		if bytes.Compare(buf.Bytes(), TestBlock2) != 0 {
+			t.Errorf("Block present, but got %+q, expected %+q", buf, TestBlock2)
 		}
 	}
 
-	n, err = v.Get(context.Background(), TestHash3, data)
+	buf.Reset()
+	_, err = v.BlockRead(context.Background(), TestHash3, buf)
 	if err != nil {
 		t.Error(err)
 	} else {
-		if bytes.Compare(data[:n], TestBlock3) != 0 {
-			t.Errorf("Block present, but to %+q, expected %+q", data[:n], TestBlock3)
+		if bytes.Compare(buf.Bytes(), TestBlock3) != 0 {
+			t.Errorf("Block present, but to %+q, expected %+q", buf, TestBlock3)
 		}
 	}
 }
@@ -328,13 +247,13 @@ func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+	if err := v.BlockWrite(context.Background(), TestHash, TestBlock); err != nil {
 		t.Error(err)
 	}
 
 	// We'll verify { t0 < threshold < t1 }, where t0 is the
-	// existing block's timestamp on disk before Put() and t1 is
-	// its timestamp after Put().
+	// existing block's timestamp on disk before BlockWrite() and t1 is
+	// its timestamp after BlockWrite().
 	threshold := time.Now().Add(-time.Second)
 
 	// Set the stored block's mtime far enough in the past that we
@@ -348,7 +267,7 @@ func (s *genericVolumeSuite) testPutAndTouch(t TB, factory TestableVolumeFactory
 	}
 
 	// Write the same block again.
-	if err := v.Put(context.Background(), TestHash, TestBlock); err != nil {
+	if err := v.BlockWrite(context.Background(), TestHash, TestBlock); err != nil {
 		t.Error(err)
 	}
 
@@ -367,7 +286,7 @@ func (s *genericVolumeSuite) testTouchNoSuchBlock(t TB, factory TestableVolumeFa
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	if err := v.Touch(TestHash); err == nil {
+	if err := v.BlockTouch(TestHash); err == nil {
 		t.Error("Expected error when attempted to touch a non-existing block")
 	}
 }
@@ -384,12 +303,12 @@ func (s *genericVolumeSuite) testMtimeNoSuchBlock(t TB, factory TestableVolumeFa
 	}
 }
 
-// Put a few blocks and invoke IndexTo with:
+// Put a few blocks and invoke Index with:
 // * no prefix
 // * with a prefix
 // * with no such prefix
 // Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
+func (s *genericVolumeSuite) testIndex(t TB, factory TestableVolumeFactory) {
 	s.setup(t)
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
@@ -400,9 +319,9 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 	minMtime := time.Now().UTC().UnixNano()
 	minMtime -= minMtime % 1e9
 
-	v.PutRaw(TestHash, TestBlock)
-	v.PutRaw(TestHash2, TestBlock2)
-	v.PutRaw(TestHash3, TestBlock3)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash2, TestBlock2)
+	v.BlockWrite(context.Background(), TestHash3, TestBlock3)
 
 	maxMtime := time.Now().UTC().UnixNano()
 	if maxMtime%1e9 > 0 {
@@ -412,13 +331,13 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 
 	// Blocks whose names aren't Keep hashes should be omitted from
 	// index
-	v.PutRaw("fffffffffnotreallyahashfffffffff", nil)
-	v.PutRaw("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
-	v.PutRaw("f0000000000000000000000000000000f", nil)
-	v.PutRaw("f00", nil)
+	v.BlockWrite(context.Background(), "fffffffffnotreallyahashfffffffff", nil)
+	v.BlockWrite(context.Background(), "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", nil)
+	v.BlockWrite(context.Background(), "f0000000000000000000000000000000f", nil)
+	v.BlockWrite(context.Background(), "f00", nil)
 
 	buf := new(bytes.Buffer)
-	v.IndexTo("", buf)
+	v.Index(context.Background(), "", buf)
 	indexRows := strings.Split(string(buf.Bytes()), "\n")
 	sort.Strings(indexRows)
 	sortedIndex := strings.Join(indexRows, "\n")
@@ -441,7 +360,7 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 
 	for _, prefix := range []string{"f", "f15", "f15ac"} {
 		buf = new(bytes.Buffer)
-		v.IndexTo(prefix, buf)
+		v.Index(context.Background(), prefix, buf)
 
 		m, err := regexp.MatchString(`^`+TestHash2+`\+\d+ \d+\n$`, string(buf.Bytes()))
 		if err != nil {
@@ -453,11 +372,11 @@ func (s *genericVolumeSuite) testIndexTo(t TB, factory TestableVolumeFactory) {
 
 	for _, prefix := range []string{"zero", "zip", "zilch"} {
 		buf = new(bytes.Buffer)
-		err := v.IndexTo(prefix, buf)
+		err := v.Index(context.Background(), prefix, buf)
 		if err != nil {
-			t.Errorf("Got error on IndexTo with no such prefix %v", err.Error())
+			t.Errorf("Got error on Index with no such prefix %v", err.Error())
 		} else if buf.Len() != 0 {
-			t.Errorf("Expected empty list for IndexTo with no such prefix %s", prefix)
+			t.Errorf("Expected empty list for Index with no such prefix %s", prefix)
 		}
 	}
 }
@@ -471,17 +390,17 @@ func (s *genericVolumeSuite) testDeleteNewBlock(t TB, factory TestableVolumeFact
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.Put(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 
-	if err := v.Trash(TestHash); err != nil {
+	if err := v.BlockTrash(TestHash); err != nil {
 		t.Error(err)
 	}
-	data := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), TestHash, data)
+	buf := bytes.NewBuffer(nil)
+	_, err := v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
 		t.Error(err)
-	} else if bytes.Compare(data[:n], TestBlock) != 0 {
-		t.Errorf("Got data %+q, expected %+q", data[:n], TestBlock)
+	} else if buf.String() != string(TestBlock) {
+		t.Errorf("Got data %+q, expected %+q", buf.String(), TestBlock)
 	}
 }
 
@@ -494,14 +413,13 @@ func (s *genericVolumeSuite) testDeleteOldBlock(t TB, factory TestableVolumeFact
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.Put(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
-	if err := v.Trash(TestHash); err != nil {
+	if err := v.BlockTrash(TestHash); err != nil {
 		t.Error(err)
 	}
-	data := make([]byte, BlockSize)
-	if _, err := v.Get(context.Background(), TestHash, data); err == nil || !os.IsNotExist(err) {
+	if _, err := v.BlockRead(context.Background(), TestHash, io.Discard); err == nil || !os.IsNotExist(err) {
 		t.Errorf("os.IsNotExist(%v) should have been true", err)
 	}
 
@@ -510,18 +428,13 @@ func (s *genericVolumeSuite) testDeleteOldBlock(t TB, factory TestableVolumeFact
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
 	}
 
-	err = v.Compare(context.Background(), TestHash, TestBlock)
-	if err == nil || !os.IsNotExist(err) {
-		t.Fatalf("os.IsNotExist(%v) should have been true", err)
-	}
-
 	indexBuf := new(bytes.Buffer)
-	v.IndexTo("", indexBuf)
+	v.Index(context.Background(), "", indexBuf)
 	if strings.Contains(string(indexBuf.Bytes()), TestHash) {
-		t.Fatalf("Found trashed block in IndexTo")
+		t.Fatalf("Found trashed block in Index")
 	}
 
-	err = v.Touch(TestHash)
+	err = v.BlockTouch(TestHash)
 	if err == nil || !os.IsNotExist(err) {
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
 	}
@@ -534,33 +447,11 @@ func (s *genericVolumeSuite) testDeleteNoSuchBlock(t TB, factory TestableVolumeF
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	if err := v.Trash(TestHash2); err == nil {
+	if err := v.BlockTrash(TestHash2); err == nil {
 		t.Errorf("Expected error when attempting to delete a non-existing block")
 	}
 }
 
-// Invoke Status and verify that VolumeStatus is returned
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testStatus(t TB, factory TestableVolumeFactory) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	// Get node status and make a basic sanity check.
-	status := v.Status()
-	if status.DeviceNum == 0 {
-		t.Errorf("uninitialized device_num in %v", status)
-	}
-
-	if status.BytesFree == 0 {
-		t.Errorf("uninitialized bytes_free in %v", status)
-	}
-
-	if status.BytesUsed == 0 {
-		t.Errorf("uninitialized bytes_used in %v", status)
-	}
-}
-
 func getValueFrom(cv *prometheus.CounterVec, lbls prometheus.Labels) float64 {
 	c, _ := cv.GetMetricWith(lbls)
 	pb := &dto.Metric{}
@@ -575,7 +466,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	opsC, _, ioC := s.metrics.getCounterVecsFor(prometheus.Labels{"device_id": v.GetDeviceID()})
+	opsC, _, ioC := s.metrics.getCounterVecsFor(prometheus.Labels{"device_id": v.DeviceID()})
 
 	if ioC == nil {
 		t.Error("ioBytes CounterVec is nil")
@@ -600,7 +491,7 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 
 	// Test Put if volume is writable
 	if !readonly {
-		err = v.Put(context.Background(), TestHash, TestBlock)
+		err = v.BlockWrite(context.Background(), TestHash, TestBlock)
 		if err != nil {
 			t.Errorf("Got err putting block %q: %q, expected nil", TestBlock, err)
 		}
@@ -614,11 +505,10 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 			t.Error("ioBytes{direction=out} counter shouldn't be zero")
 		}
 	} else {
-		v.PutRaw(TestHash, TestBlock)
+		v.BlockWrite(context.Background(), TestHash, TestBlock)
 	}
 
-	buf := make([]byte, BlockSize)
-	_, err = v.Get(context.Background(), TestHash, buf)
+	_, err = v.BlockRead(context.Background(), TestHash, io.Discard)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -634,18 +524,6 @@ func (s *genericVolumeSuite) testMetrics(t TB, readonly bool, factory TestableVo
 	}
 }
 
-// Invoke String for the volume; expect non-empty result
-// Test should pass for both writable and read-only volumes
-func (s *genericVolumeSuite) testString(t TB, factory TestableVolumeFactory) {
-	s.setup(t)
-	v := s.newVolume(t, factory)
-	defer v.Teardown()
-
-	if id := v.String(); len(id) == 0 {
-		t.Error("Got empty string for v.String()")
-	}
-}
-
 // Putting, updating, touching, and deleting blocks from a read-only volume result in error.
 // Test is intended for only read-only volumes
 func (s *genericVolumeSuite) testUpdateReadOnly(t TB, factory TestableVolumeFactory) {
@@ -653,39 +531,38 @@ func (s *genericVolumeSuite) testUpdateReadOnly(t TB, factory TestableVolumeFact
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.PutRaw(TestHash, TestBlock)
-	buf := make([]byte, BlockSize)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 
 	// Get from read-only volume should succeed
-	_, err := v.Get(context.Background(), TestHash, buf)
+	_, err := v.BlockRead(context.Background(), TestHash, io.Discard)
 	if err != nil {
 		t.Errorf("got err %v, expected nil", err)
 	}
 
 	// Put a new block to read-only volume should result in error
-	err = v.Put(context.Background(), TestHash2, TestBlock2)
+	err = v.BlockWrite(context.Background(), TestHash2, TestBlock2)
 	if err == nil {
 		t.Errorf("Expected error when putting block in a read-only volume")
 	}
-	_, err = v.Get(context.Background(), TestHash2, buf)
+	_, err = v.BlockRead(context.Background(), TestHash2, io.Discard)
 	if err == nil {
 		t.Errorf("Expected error when getting block whose put in read-only volume failed")
 	}
 
 	// Touch a block in read-only volume should result in error
-	err = v.Touch(TestHash)
+	err = v.BlockTouch(TestHash)
 	if err == nil {
 		t.Errorf("Expected error when touching block in a read-only volume")
 	}
 
 	// Delete a block from a read-only volume should result in error
-	err = v.Trash(TestHash)
+	err = v.BlockTrash(TestHash)
 	if err == nil {
 		t.Errorf("Expected error when deleting block from a read-only volume")
 	}
 
 	// Overwriting an existing block in read-only volume should result in error
-	err = v.Put(context.Background(), TestHash, TestBlock)
+	err = v.BlockWrite(context.Background(), TestHash, TestBlock)
 	if err == nil {
 		t.Errorf("Expected error when putting block in a read-only volume")
 	}
@@ -698,43 +575,43 @@ func (s *genericVolumeSuite) testGetConcurrent(t TB, factory TestableVolumeFacto
 	v := s.newVolume(t, factory)
 	defer v.Teardown()
 
-	v.PutRaw(TestHash, TestBlock)
-	v.PutRaw(TestHash2, TestBlock2)
-	v.PutRaw(TestHash3, TestBlock3)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash2, TestBlock2)
+	v.BlockWrite(context.Background(), TestHash3, TestBlock3)
 
 	sem := make(chan int)
 	go func() {
-		buf := make([]byte, BlockSize)
-		n, err := v.Get(context.Background(), TestHash, buf)
+		buf := bytes.NewBuffer(nil)
+		_, err := v.BlockRead(context.Background(), TestHash, buf)
 		if err != nil {
 			t.Errorf("err1: %v", err)
 		}
-		if bytes.Compare(buf[:n], TestBlock) != 0 {
-			t.Errorf("buf should be %s, is %s", string(TestBlock), string(buf[:n]))
+		if buf.String() != string(TestBlock) {
+			t.Errorf("buf should be %s, is %s", TestBlock, buf)
 		}
 		sem <- 1
 	}()
 
 	go func() {
-		buf := make([]byte, BlockSize)
-		n, err := v.Get(context.Background(), TestHash2, buf)
+		buf := bytes.NewBuffer(nil)
+		_, err := v.BlockRead(context.Background(), TestHash2, buf)
 		if err != nil {
 			t.Errorf("err2: %v", err)
 		}
-		if bytes.Compare(buf[:n], TestBlock2) != 0 {
-			t.Errorf("buf should be %s, is %s", string(TestBlock2), string(buf[:n]))
+		if buf.String() != string(TestBlock2) {
+			t.Errorf("buf should be %s, is %s", TestBlock2, buf)
 		}
 		sem <- 1
 	}()
 
 	go func() {
-		buf := make([]byte, BlockSize)
-		n, err := v.Get(context.Background(), TestHash3, buf)
+		buf := bytes.NewBuffer(nil)
+		_, err := v.BlockRead(context.Background(), TestHash3, buf)
 		if err != nil {
 			t.Errorf("err3: %v", err)
 		}
-		if bytes.Compare(buf[:n], TestBlock3) != 0 {
-			t.Errorf("buf should be %s, is %s", string(TestBlock3), string(buf[:n]))
+		if buf.String() != string(TestBlock3) {
+			t.Errorf("buf should be %s, is %s", TestBlock3, buf)
 		}
 		sem <- 1
 	}()
@@ -754,7 +631,7 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
 
 	sem := make(chan int)
 	go func(sem chan int) {
-		err := v.Put(context.Background(), TestHash, TestBlock)
+		err := v.BlockWrite(context.Background(), TestHash, TestBlock)
 		if err != nil {
 			t.Errorf("err1: %v", err)
 		}
@@ -762,7 +639,7 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
 	}(sem)
 
 	go func(sem chan int) {
-		err := v.Put(context.Background(), TestHash2, TestBlock2)
+		err := v.BlockWrite(context.Background(), TestHash2, TestBlock2)
 		if err != nil {
 			t.Errorf("err2: %v", err)
 		}
@@ -770,7 +647,7 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
 	}(sem)
 
 	go func(sem chan int) {
-		err := v.Put(context.Background(), TestHash3, TestBlock3)
+		err := v.BlockWrite(context.Background(), TestHash3, TestBlock3)
 		if err != nil {
 			t.Errorf("err3: %v", err)
 		}
@@ -783,29 +660,28 @@ func (s *genericVolumeSuite) testPutConcurrent(t TB, factory TestableVolumeFacto
 	}
 
 	// Double check that we actually wrote the blocks we expected to write.
-	buf := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), TestHash, buf)
+	buf := bytes.NewBuffer(nil)
+	_, err := v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
 		t.Errorf("Get #1: %v", err)
-	}
-	if bytes.Compare(buf[:n], TestBlock) != 0 {
-		t.Errorf("Get #1: expected %s, got %s", string(TestBlock), string(buf[:n]))
+	} else if buf.String() != string(TestBlock) {
+		t.Errorf("Get #1: expected %s, got %s", TestBlock, buf)
 	}
 
-	n, err = v.Get(context.Background(), TestHash2, buf)
+	buf.Reset()
+	_, err = v.BlockRead(context.Background(), TestHash2, buf)
 	if err != nil {
 		t.Errorf("Get #2: %v", err)
-	}
-	if bytes.Compare(buf[:n], TestBlock2) != 0 {
-		t.Errorf("Get #2: expected %s, got %s", string(TestBlock2), string(buf[:n]))
+	} else if buf.String() != string(TestBlock2) {
+		t.Errorf("Get #2: expected %s, got %s", TestBlock2, buf)
 	}
 
-	n, err = v.Get(context.Background(), TestHash3, buf)
+	buf.Reset()
+	_, err = v.BlockRead(context.Background(), TestHash3, buf)
 	if err != nil {
 		t.Errorf("Get #3: %v", err)
-	}
-	if bytes.Compare(buf[:n], TestBlock3) != 0 {
-		t.Errorf("Get #3: expected %s, got %s", string(TestBlock3), string(buf[:n]))
+	} else if buf.String() != string(TestBlock3) {
+		t.Errorf("Get #3: expected %s, got %s", TestBlock3, buf)
 	}
 }
 
@@ -819,17 +695,18 @@ func (s *genericVolumeSuite) testPutFullBlock(t TB, factory TestableVolumeFactor
 	wdata[0] = 'a'
 	wdata[BlockSize-1] = 'z'
 	hash := fmt.Sprintf("%x", md5.Sum(wdata))
-	err := v.Put(context.Background(), hash, wdata)
+	err := v.BlockWrite(context.Background(), hash, wdata)
 	if err != nil {
 		t.Fatal(err)
 	}
-	buf := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), hash, buf)
+
+	buf := bytes.NewBuffer(nil)
+	_, err = v.BlockRead(context.Background(), hash, buf)
 	if err != nil {
 		t.Error(err)
 	}
-	if bytes.Compare(buf[:n], wdata) != 0 {
-		t.Error("buf %+q != wdata %+q", buf[:n], wdata)
+	if buf.String() != string(wdata) {
+		t.Error("buf %+q != wdata %+q", buf, wdata)
 	}
 }
 
@@ -844,48 +721,42 @@ func (s *genericVolumeSuite) testTrashUntrash(t TB, readonly bool, factory Testa
 	defer v.Teardown()
 
 	// put block and backdate it
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
-	buf := make([]byte, BlockSize)
-	n, err := v.Get(context.Background(), TestHash, buf)
+	buf := bytes.NewBuffer(nil)
+	_, err := v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
 		t.Fatal(err)
 	}
-	if bytes.Compare(buf[:n], TestBlock) != 0 {
-		t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+	if buf.String() != string(TestBlock) {
+		t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
 	}
 
 	// Trash
-	err = v.Trash(TestHash)
-	if readonly {
-		if err != MethodDisabledError {
-			t.Fatal(err)
-		}
-	} else if err != nil {
-		if err != ErrNotImplemented {
-			t.Fatal(err)
-		}
-	} else {
-		_, err = v.Get(context.Background(), TestHash, buf)
-		if err == nil || !os.IsNotExist(err) {
-			t.Errorf("os.IsNotExist(%v) should have been true", err)
-		}
+	err = v.BlockTrash(TestHash)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+	_, err = v.BlockRead(context.Background(), TestHash, buf)
+	if err == nil || !os.IsNotExist(err) {
+		t.Errorf("os.IsNotExist(%v) should have been true", err)
+	}
 
-		// Untrash
-		err = v.Untrash(TestHash)
-		if err != nil {
-			t.Fatal(err)
-		}
+	// Untrash
+	err = v.BlockUntrash(TestHash)
+	if err != nil {
+		t.Fatal(err)
 	}
 
 	// Get the block - after trash and untrash sequence
-	n, err = v.Get(context.Background(), TestHash, buf)
+	_, err = v.BlockRead(context.Background(), TestHash, buf)
 	if err != nil {
 		t.Fatal(err)
 	}
-	if bytes.Compare(buf[:n], TestBlock) != 0 {
-		t.Errorf("Got data %+q, expected %+q", buf[:n], TestBlock)
+	if buf.String() != string(TestBlock) {
+		t.Errorf("Got data %+q, expected %+q", buf, TestBlock)
 	}
 }
 
@@ -895,13 +766,13 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	defer v.Teardown()
 
 	checkGet := func() error {
-		buf := make([]byte, BlockSize)
-		n, err := v.Get(context.Background(), TestHash, buf)
+		buf := bytes.NewBuffer(nil)
+		_, err := v.BlockRead(context.Background(), TestHash, buf)
 		if err != nil {
 			return err
 		}
-		if bytes.Compare(buf[:n], TestBlock) != 0 {
-			t.Fatalf("Got data %+q, expected %+q", buf[:n], TestBlock)
+		if buf.String() != string(TestBlock) {
+			t.Fatalf("Got data %+q, expected %+q", buf, TestBlock)
 		}
 
 		_, err = v.Mtime(TestHash)
@@ -909,13 +780,8 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 			return err
 		}
 
-		err = v.Compare(context.Background(), TestHash, TestBlock)
-		if err != nil {
-			return err
-		}
-
 		indexBuf := new(bytes.Buffer)
-		v.IndexTo("", indexBuf)
+		v.Index(context.Background(), "", indexBuf)
 		if !strings.Contains(string(indexBuf.Bytes()), TestHash) {
 			return os.ErrNotExist
 		}
@@ -927,7 +793,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
 	s.cluster.Collections.BlobTrashLifetime.Set("1h")
 
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
 	err := checkGet()
@@ -936,12 +802,9 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	}
 
 	// Trash the block
-	err = v.Trash(TestHash)
-	if err == MethodDisabledError || err == ErrNotImplemented {
-		// Skip the trash tests for read-only volumes, and
-		// volume types that don't support
-		// BlobTrashLifetime>0.
-		return
+	err = v.BlockTrash(TestHash)
+	if err != nil {
+		t.Fatal(err)
 	}
 
 	err = checkGet()
@@ -949,7 +812,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
 	}
 
-	err = v.Touch(TestHash)
+	err = v.BlockTouch(TestHash)
 	if err == nil || !os.IsNotExist(err) {
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
 	}
@@ -958,7 +821,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
 	// Even after emptying the trash, we can untrash our block
 	// because the deadline hasn't been reached.
-	err = v.Untrash(TestHash)
+	err = v.BlockUntrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -968,7 +831,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 		t.Fatal(err)
 	}
 
-	err = v.Touch(TestHash)
+	err = v.BlockTouch(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -979,7 +842,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// If the only block in the trash has already been untrashed,
 	// most volumes will fail a subsequent Untrash with a 404, but
 	// it's also acceptable for Untrash to succeed.
-	err = v.Untrash(TestHash)
+	err = v.BlockUntrash(TestHash)
 	if err != nil && !os.IsNotExist(err) {
 		t.Fatalf("Expected success or os.IsNotExist(), but got: %v", err)
 	}
@@ -998,7 +861,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
 	s.cluster.Collections.BlobTrashLifetime.Set("1ns")
 
-	err = v.Trash(TestHash)
+	err = v.BlockTrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -1009,7 +872,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 
 	// Even though 1ns has passed, we can untrash because we
 	// haven't called EmptyTrash yet.
-	err = v.Untrash(TestHash)
+	err = v.BlockUntrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -1022,7 +885,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// goes away.
 	// (In Azure volumes, un/trash changes Mtime, so first backdate again)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
-	_ = v.Trash(TestHash)
+	_ = v.BlockTrash(TestHash)
 	err = checkGet()
 	if err == nil || !os.IsNotExist(err) {
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
@@ -1030,7 +893,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	v.EmptyTrash()
 
 	// Untrash won't find it
-	err = v.Untrash(TestHash)
+	err = v.BlockUntrash(TestHash)
 	if err == nil || !os.IsNotExist(err) {
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
 	}
@@ -1045,11 +908,11 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// being trashed, and then the trash gets emptied, the newer
 	// un-trashed copy doesn't get deleted along with it.
 
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
 	s.cluster.Collections.BlobTrashLifetime.Set("1ns")
-	err = v.Trash(TestHash)
+	err = v.BlockTrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -1058,7 +921,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 		t.Fatalf("os.IsNotExist(%v) should have been true", err)
 	}
 
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
 	// EmptyTrash should not delete the untrashed copy.
@@ -1073,20 +936,20 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// at intermediate time B (A < B < C), it is still possible to
 	// untrash the block whose deadline is "C".
 
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
 	s.cluster.Collections.BlobTrashLifetime.Set("1ns")
-	err = v.Trash(TestHash)
+	err = v.BlockTrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	v.PutRaw(TestHash, TestBlock)
+	v.BlockWrite(context.Background(), TestHash, TestBlock)
 	v.TouchWithDate(TestHash, time.Now().Add(-2*s.cluster.Collections.BlobSigningTTL.Duration()))
 
 	s.cluster.Collections.BlobTrashLifetime.Set("1h")
-	err = v.Trash(TestHash)
+	err = v.BlockTrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -1094,7 +957,7 @@ func (s *genericVolumeSuite) testTrashEmptyTrashUntrash(t TB, factory TestableVo
 	// EmptyTrash should not prevent us from recovering the
 	// time.Hour ("C") trash
 	v.EmptyTrash()
-	err = v.Untrash(TestHash)
+	err = v.BlockUntrash(TestHash)
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/services/keepstore/volume_test.go b/services/keepstore/volume_test.go
index 9162e4b057..13ce7afc73 100644
--- a/services/keepstore/volume_test.go
+++ b/services/keepstore/volume_test.go
@@ -5,16 +5,7 @@
 package keepstore
 
 import (
-	"context"
-	"fmt"
-	"io"
-	"os"
-	"strings"
-	"sync"
 	"time"
-
-	"git.arvados.org/arvados.git/sdk/go/arvados"
-	"github.com/sirupsen/logrus"
 )
 
 var (
@@ -42,10 +33,6 @@ var (
 type TestableVolume interface {
 	volume
 
-	// [Over]write content for a locator with the given data,
-	// bypassing all constraints like readonly and serialize.
-	BlockWriteRaw(locator string, data []byte)
-
 	// Returns the strings that a driver uses to record read/write operations.
 	ReadWriteOperationLabelValues() (r, w string)
 
@@ -56,206 +43,3 @@ type TestableVolume interface {
 	// Clean up, delete temporary files.
 	Teardown()
 }
-
-func init() {
-	driver["mock"] = newMockVolume
-}
-
-// MockVolumes are test doubles for Volumes, used to test handlers.
-type MockVolume struct {
-	Store      map[string][]byte
-	Timestamps map[string]time.Time
-
-	// If non-nil, all operations (except status) return Err.
-	Err error
-
-	// Touchable volumes' Touch() method succeeds for a locator
-	// that has been BlockWrite().
-	Touchable bool
-
-	// Gate is a "starting gate", allowing test cases to pause
-	// volume operations long enough to inspect state. Every
-	// operation (except Status) starts by receiving from
-	// Gate. Sending one value unblocks one operation; closing the
-	// channel unblocks all operations. By default, Gate is a
-	// closed channel, so all operations proceed without
-	// blocking. See trash_worker_test.go for an example.
-	Gate chan struct{} `json:"-"`
-
-	cluster *arvados.Cluster
-	volume  arvados.Volume
-	logger  logrus.FieldLogger
-	metrics *volumeMetricsVecs
-	called  map[string]int
-	mutex   sync.Mutex
-}
-
-// newMockVolume returns a non-Bad, non-Readonly, Touchable mock
-// volume.
-func newMockVolume(params newVolumeParams) (volume, error) {
-	gate := make(chan struct{})
-	close(gate)
-	return &MockVolume{
-		Store:      make(map[string][]byte),
-		Timestamps: make(map[string]time.Time),
-		Bad:        false,
-		Touchable:  true,
-		called:     map[string]int{},
-		Gate:       gate,
-		cluster:    params.Cluster,
-		volume:     params.ConfigVolume,
-		logger:     params.Logger,
-		metrics:    params.MetricsVecs,
-	}, nil
-}
-
-// CallCount returns how many times the named method has been called.
-func (v *MockVolume) CallCount(method string) int {
-	v.mutex.Lock()
-	defer v.mutex.Unlock()
-	c, ok := v.called[method]
-	if !ok {
-		return 0
-	}
-	return c
-}
-
-func (v *MockVolume) gotCall(method string) {
-	v.mutex.Lock()
-	defer v.mutex.Unlock()
-	if _, ok := v.called[method]; !ok {
-		v.called[method] = 1
-	} else {
-		v.called[method]++
-	}
-}
-
-func (v *MockVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
-	v.gotCall("BlockRead")
-	<-v.Gate
-	if v.Err != nil {
-		return 0, v.Err
-	}
-	if data, ok := v.Store[hash]; ok {
-		return io.Write(data)
-	}
-	return 0, os.ErrNotExist
-}
-
-func (v *MockVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
-	v.gotCall("BlockWrite")
-	<-v.Gate
-	if v.Err != nil {
-		return v.Err
-	}
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
-	v.mutex.Lock()
-	defer v.mutex.Unlock()
-	v.Store[hash] = data
-	v.Timestamps[hash] = time.Now()
-	return nil
-}
-
-func (v *MockVolume) Touch(hash string) error {
-	return v.TouchWithDate(hash, time.Now())
-}
-
-func (v *MockVolume) TouchWithDate(hash string, t time.Time) error {
-	v.gotCall("Touch")
-	<-v.Gate
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
-	v.mutex.Lock()
-	defer v.mutex.Unlock()
-	if _, exists := v.Store[hash]; !exists {
-		return os.ErrNotExist
-	}
-	v.Timestamps[hash] = t
-	return nil
-}
-
-func (v *MockVolume) Mtime(loc string) (time.Time, error) {
-	v.gotCall("Mtime")
-	<-v.Gate
-	if v.Err != nil {
-		return time.Time{}, v.Err
-	}
-	if t, ok := v.Timestamps[loc]; !ok {
-		return time.Time{}, os.ErrNotExist
-	} else {
-		return t, nil
-	}
-}
-
-func (v *MockVolume) Index(ctx context.Context, prefix string, w io.Writer) error {
-	v.gotCall("IndexTo")
-	<-v.Gate
-	v.mutex.Lock()
-	defer v.mutex.Unlock()
-	for hash, data := range v.Store {
-		if err := ctx.Err(); err != nil {
-			return err
-		}
-		if !IsValidLocator(hash) || !strings.HasPrefix(hash, prefix) {
-			continue
-		}
-		_, err := fmt.Fprintf(w, "%s+%d %d\n", hash, len(data), v.Timestamps[hash].Unix())
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (v *MockVolume) Trash(hash string) error {
-	v.gotCall("Delete")
-	<-v.Gate
-	if v.volume.ReadOnly {
-		return errMethodNotAllowed
-	}
-	if _, ok := v.Store[hash]; !ok {
-		return os.ErrNotExist
-	}
-	if time.Since(v.Timestamps[hash]) < time.Duration(v.cluster.Collections.BlobSigningTTL) {
-		return nil
-	}
-	delete(v.Store, hash)
-	delete(v.Timestamps, hash)
-	return nil
-}
-
-func (v *MockVolume) DeviceID() string {
-	return "mock-device-id"
-}
-
-func (v *MockVolume) Untrash(hash string) error {
-	v.mutex.Lock()
-	defer v.mutex.Unlock()
-	data, ok := v.Trash[hash]
-	if !ok {
-		return os.ErrNotExist
-	}
-	return nil
-}
-
-func (v *MockVolume) Status() *VolumeStatus {
-	var used uint64
-	for _, block := range v.Store {
-		used = used + uint64(len(block))
-	}
-	return &VolumeStatus{"/bogo", 123, 1000000 - used, used}
-}
-
-func (v *MockVolume) String() string {
-	return "[MockVolume]"
-}
-
-func (v *MockVolume) EmptyTrash() {
-}
-
-func (v *MockVolume) GetStorageClasses() []string {
-	return nil
-}

commit a99ab0bc3c6e8f75d8ca87053c300cf3c79ba1b4
Author: Tom Clegg <tom at curii.com>
Date:   Thu Feb 1 13:41:41 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/keepstore_test.go b/services/keepstore/keepstore_test.go
index 7055d8e526..b7a094d0b8 100644
--- a/services/keepstore/keepstore_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -8,16 +8,13 @@ import (
 	"bytes"
 	"context"
 	"crypto/md5"
-	"encoding/json"
 	"fmt"
 	"io"
 	"net/http"
-	"net/http/httptest"
 	"os"
 	"sort"
 	"strings"
 	"sync"
-	"sync/atomic"
 	"time"
 
 	"git.arvados.org/arvados.git/lib/config"
@@ -34,7 +31,7 @@ var testServiceURL = func() arvados.URL {
 }()
 
 func authContext(token string) context.Context {
-	return auth.NewContext(context.TODO(), auth.Credentials{Tokens: []string{token}})
+	return auth.NewContext(context.TODO(), &auth.Credentials{Tokens: []string{token}})
 }
 
 func testCluster(t TB) *arvados.Cluster {
@@ -55,7 +52,9 @@ func testKeepstore(t TB, cluster *arvados.Cluster) (*keepstore, context.CancelFu
 	ctx, cancel := context.WithCancel(context.Background())
 	reg := prometheus.NewRegistry()
 	ks, err := newKeepstore(ctx, cluster, cluster.SystemRootToken, reg, testServiceURL)
-	t.Assert(err, IsNil)
+	if err != nil {
+		t.Fatal(err)
+	}
 	return ks, cancel
 }
 
@@ -68,8 +67,8 @@ type keepstoreSuite struct {
 func (s *keepstoreSuite) SetUpTest(c *C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
-		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "stub"},
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "stub"},
 	}
 }
 
@@ -80,28 +79,28 @@ func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
 	ctx := authContext(arvadostest.ActiveTokenV2)
 
 	fooHash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
-	err := ks.mountsW[0].BlockWrite(fooHash, []byte("bar"))
+	err := ks.mountsW[0].BlockWrite(ctx, fooHash, []byte("bar"))
 	c.Assert(err, IsNil)
 
-	err = ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+	_, err = ks.BlockWrite(ctx, arvados.BlockWriteOptions{
 		Hash: fooHash,
 		Data: []byte("foo"),
 	})
 	c.Check(err, ErrorMatches, "checksum mismatch")
 
 	buf := bytes.NewBuffer(nil)
-	err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+	_, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
 		Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
 		WriteTo: buf,
 	})
 	c.Check(err, ErrorMatches, "checksum mismatch")
 	c.Check(buf.Len() < 3, Equals, true)
 
-	err = ks.mountsW[1].BlockWrite(fooHash, []byte("foo"))
+	err = ks.mountsW[1].BlockWrite(ctx, fooHash, []byte("foo"))
 	c.Assert(err, IsNil)
 
-	buf := bytes.NewBuffer(nil)
-	err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+	buf = bytes.NewBuffer(nil)
+	_, err = ks.BlockRead(ctx, arvados.BlockReadOptions{
 		Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
 		WriteTo: buf,
 	})
@@ -153,17 +152,18 @@ func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 			StorageClasses: map[string]bool{"class2": true, "class3": true}},
 	}
 
+	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+
 	for _, trial := range []struct {
 		priority1 int // priority of class1, thus vol1
 		priority2 int // priority of class2
 		priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
-		get1      int // expected number of "get" ops on vol1
-		get2      int // expected number of "get" ops on vol2
+		expectLog string
 	}{
-		{100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
-		{100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
-		{66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
-		{66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
+		{100, 50, 50, "111 read acb\n"},              // class1 has higher priority => try vol1 first, no need to try vol2
+		{100, 100, 100, "111 read acb\n"},            // same priority, vol1 is first in rendezvous order => try vol1 first and succeed
+		{66, 99, 33, "222 read acb\n111 read acb\n"}, // class2 has higher priority => try vol2 first, then try vol1
+		{66, 33, 99, "222 read acb\n111 read acb\n"}, // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
 	} {
 		c.Logf("%+v", trial)
 
@@ -174,8 +174,11 @@ func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 		}
 		ks, cancel := testKeepstore(c, s.cluster)
 		defer cancel()
+		stubLog := &stubLog{}
+		for _, mnt := range ks.mounts {
+			mnt.volume.(*stubVolume).stubLog = stubLog
+		}
 		ctx := authContext(arvadostest.ActiveTokenV2)
-
 		resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
 			Hash:           fooHash,
 			Data:           []byte("foo"),
@@ -188,675 +191,102 @@ func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 		})
 		c.Assert(n, Equals, 3)
 		c.Assert(err, IsNil)
-		c.Check(s.keepstore.mounts["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), Equals, trial.get1)
-		c.Check(s.keepstore.mounts["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), Equals, trial.get2)
+		c.Check(stubLog.String(), Equals, trial.expectLog)
 	}
 }
 
-func (s *keepstoreSuite) TestPutWithNoWritableVolumes(c *C) {
-	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-111111111111111": {
-			Driver:         "mock",
-			Replication:    1,
-			ReadOnly:       true,
-			StorageClasses: map[string]bool{"class1": true}},
+func (s *keepstoreSuite) TestBlockWrite_NoWritableVolumes(c *C) {
+	for _, v := range s.cluster.Volumes {
+		v.ReadOnly = true
 	}
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-	resp := IssueRequest(s.router,
-		&RequestTester{
-			method:         "PUT",
-			uri:            "/" + TestHash,
-			requestBody:    TestBlock,
-			storageClasses: "class1",
-		})
-	c.Check(resp.Code, Equals, FullError.HTTPCode)
-	c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), Equals, 0)
+	ks, cancel := testKeepstore(c, s.cluster)
+	defer cancel()
+	for _, mnt := range ks.mounts {
+		mnt.volume.(*stubVolume).blockWrite = func(context.Context, string, []byte) error { c.Fatal("volume BlockWrite called") }
+	}
+	ctx := authContext(arvadostest.ActiveTokenV2)
+	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+
+	_, err := ks.BlockWrite(ctx, fooHash, []byte("foo"))
+	c.Check(err, NotNil)
+	c.Check(err.(interface{ HTTPStatus() int }).HTTPStatus(), Equals, http.StatusInsufficientStorage)
 }
 
-func (s *keepstoreSuite) TestConcurrentWritesToMultipleStorageClasses(c *C) {
+func (s *keepstoreSuite) TestBlockWrite_MultipleStorageClasses(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
-			Driver:         "mock",
+			Driver:         "stub",
 			Replication:    1,
 			StorageClasses: map[string]bool{"class1": true}},
 		"zzzzz-nyw5e-121212121212121": {
-			Driver:         "mock",
+			Driver:         "stub",
 			Replication:    1,
 			StorageClasses: map[string]bool{"class1": true, "class2": true}},
 		"zzzzz-nyw5e-222222222222222": {
-			Driver:         "mock",
+			Driver:         "stub",
 			Replication:    1,
 			StorageClasses: map[string]bool{"class2": true}},
 	}
+	s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+		"class1": {},
+		"class2": {},
+		"class3": {},
+	}
+	ks, cancel := testKeepstore(c, s.cluster)
+	defer cancel()
+	stubLog := &stubLog{}
+	for _, mnt := range ks.mounts {
+		mnt.volume.(*stubVolume).stubLog = stubLog
+	}
+	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+
+	rvz := ks.rendezvous(fooHash, ks.mountsW)
+	c.Assert(rvz[0].UUID[24:], Equals, "111")
+	c.Assert(rvz[1].UUID[24:], Equals, "121")
+	c.Assert(rvz[2].UUID[24:], Equals, "222")
 
 	for _, trial := range []struct {
-		setCounter uint32 // value to stuff vm.counter, to control offset
-		classes    string // desired classes
-		put111     int    // expected number of "put" ops on 11111... after 2x put reqs
-		put121     int    // expected number of "put" ops on 12121...
-		put222     int    // expected number of "put" ops on 22222...
-		cmp111     int    // expected number of "compare" ops on 11111... after 2x put reqs
-		cmp121     int    // expected number of "compare" ops on 12121...
-		cmp222     int    // expected number of "compare" ops on 22222...
+		classes   string // desired classes
+		expectLog string
 	}{
-		{0, "class1",
-			1, 0, 0,
-			2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
-		{0, "class2",
-			0, 1, 0,
-			0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
-		{0, "class1,class2",
-			1, 1, 0,
-			2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
-		{1, "class1,class2",
-			0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
-			2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
-		{0, "class1,class2,class404",
-			1, 1, 0,
-			2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
+		{"class1", "" +
+			"111 read acb\n" +
+			"121 read acb\n" +
+			"111 write acb\n" +
+			"111 read acb\n" +
+			"111 read acb\n"},
+		{"class2", ""},
+		{"class1,class2", ""},
+		{"class1,class2", ""},
+		{"class1,class2,class404", ""},
 	} {
 		c.Logf("%+v", trial)
-		s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
-			"class1": {},
-			"class2": {},
-			"class3": {},
-		}
-		c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-		atomic.StoreUint32(&s.router.volmgr.counter, trial.setCounter)
+		stubLog.Reset()
 		for i := 0; i < 2; i++ {
-			IssueRequest(s.router,
-				&RequestTester{
-					method:         "PUT",
-					uri:            "/" + TestHash,
-					requestBody:    TestBlock,
-					storageClasses: trial.classes,
-				})
+			_, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+				Hash:           fooHash,
+				Data:           []byte("foo"),
+				StorageClasses: strings.Split(trial.classes, ","),
+			})
+			c.Check(err, IsNil)
 		}
-		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), Equals, trial.put111)
-		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), Equals, trial.put121)
-		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), Equals, trial.put222)
-		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), Equals, trial.cmp111)
-		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), Equals, trial.cmp121)
-		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), Equals, trial.cmp222)
+		c.Check(stubLog.String(), Equals, trial.expectLog)
 	}
 }
 
-// Test TOUCH requests.
-func (s *keepstoreSuite) TestTouchHandler(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-	vols := s.router.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
-	afterPut := time.Now()
-	t, err := vols[0].Mtime(TestHash)
-	c.Assert(err, IsNil)
-	c.Assert(t.Before(afterPut), Equals, true)
-
-	ExpectStatusCode(c,
-		"touch with no credentials",
-		http.StatusUnauthorized,
-		IssueRequest(s.router, &RequestTester{
-			method: "TOUCH",
-			uri:    "/" + TestHash,
-		}))
-
-	ExpectStatusCode(c,
-		"touch with non-root credentials",
-		http.StatusUnauthorized,
-		IssueRequest(s.router, &RequestTester{
-			method:   "TOUCH",
-			uri:      "/" + TestHash,
-			apiToken: arvadostest.ActiveTokenV2,
-		}))
-
-	ExpectStatusCode(c,
-		"touch non-existent block",
-		http.StatusNotFound,
-		IssueRequest(s.router, &RequestTester{
-			method:   "TOUCH",
-			uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
-			apiToken: s.cluster.SystemRootToken,
-		}))
-
-	beforeTouch := time.Now()
-	ExpectStatusCode(c,
-		"touch block",
-		http.StatusOK,
-		IssueRequest(s.router, &RequestTester{
-			method:   "TOUCH",
-			uri:      "/" + TestHash,
-			apiToken: s.cluster.SystemRootToken,
-		}))
-	t, err = vols[0].Mtime(TestHash)
-	c.Assert(err, IsNil)
-	c.Assert(t.After(beforeTouch), Equals, true)
+func (s *keepstoreSuite) TestBlockTouch(c *C) {
+	c.Fatal("todo")
 }
 
-// Test /index requests:
-//   - unauthenticated /index request
-//   - unauthenticated /index/prefix request
-//   - authenticated   /index request        | non-superuser
-//   - authenticated   /index/prefix request | non-superuser
-//   - authenticated   /index request        | superuser
-//   - authenticated   /index/prefix request | superuser
-//
-// The only /index requests that should succeed are those issued by the
-// superuser. They should pass regardless of the value of BlobSigning.
-func (s *keepstoreSuite) TestIndexHandler(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-
-	// Include multiple blocks on different volumes, and
-	// some metadata files (which should be omitted from index listings)
-	vols := s.router.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	vols[1].Put(context.Background(), TestHash2, TestBlock2)
-	vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
-	vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
-
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	unauthenticatedReq := &RequestTester{
-		method: "GET",
-		uri:    "/index",
-	}
-	authenticatedReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index",
-		apiToken: knownToken,
-	}
-	superuserReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index",
-		apiToken: s.cluster.SystemRootToken,
-	}
-	unauthPrefixReq := &RequestTester{
-		method: "GET",
-		uri:    "/index/" + TestHash[0:3],
-	}
-	authPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/" + TestHash[0:3],
-		apiToken: knownToken,
-	}
-	superuserPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/" + TestHash[0:3],
-		apiToken: s.cluster.SystemRootToken,
-	}
-	superuserNoSuchPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/abcd",
-		apiToken: s.cluster.SystemRootToken,
-	}
-	superuserInvalidPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/xyz",
-		apiToken: s.cluster.SystemRootToken,
-	}
-
-	// -------------------------------------------------------------
-	// Only the superuser should be allowed to issue /index requests.
-
-	// ---------------------------
-	// BlobSigning enabled
-	// This setting should not affect tests passing.
-	s.cluster.Collections.BlobSigning = true
-
-	// unauthenticated /index request
-	// => UnauthorizedError
-	response := IssueRequest(s.router, unauthenticatedReq)
-	ExpectStatusCode(c,
-		"permissions on, unauthenticated request",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// unauthenticated /index/prefix request
-	// => UnauthorizedError
-	response = IssueRequest(s.router, unauthPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, unauthenticated /index/prefix request",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// authenticated /index request, non-superuser
-	// => UnauthorizedError
-	response = IssueRequest(s.router, authenticatedReq)
-	ExpectStatusCode(c,
-		"permissions on, authenticated request, non-superuser",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// authenticated /index/prefix request, non-superuser
-	// => UnauthorizedError
-	response = IssueRequest(s.router, authPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, authenticated /index/prefix request, non-superuser",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// superuser /index request
-	// => OK
-	response = IssueRequest(s.router, superuserReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	// ----------------------------
-	// BlobSigning disabled
-	// Valid Request should still pass.
-	s.cluster.Collections.BlobSigning = false
-
-	// superuser /index request
-	// => OK
-	response = IssueRequest(s.router, superuserReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	expected := `^` + TestHash + `\+\d+ \d+\n` +
-		TestHash2 + `\+\d+ \d+\n\n$`
-	c.Check(response.Body.String(), Matches, expected, Commentf(
-		"permissions on, superuser request"))
-
-	// superuser /index/prefix request
-	// => OK
-	response = IssueRequest(s.router, superuserPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	expected = `^` + TestHash + `\+\d+ \d+\n\n$`
-	c.Check(response.Body.String(), Matches, expected, Commentf(
-		"permissions on, superuser /index/prefix request"))
-
-	// superuser /index/{no-such-prefix} request
-	// => OK
-	response = IssueRequest(s.router, superuserNoSuchPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	if "\n" != response.Body.String() {
-		c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
-	}
-
-	// superuser /index/{invalid-prefix} request
-	// => StatusBadRequest
-	response = IssueRequest(s.router, superuserInvalidPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusBadRequest,
-		response)
+func (s *keepstoreSuite) TestIndex(c *C) {
+	c.Fatal("todo: entire index")
+	c.Fatal("todo: specified prefix")
 }
 
-// TestDeleteHandler
-//
-// Cases tested:
-//
-//	With no token and with a non-data-manager token:
-//	* Delete existing block
-//	  (test for 403 Forbidden, confirm block not deleted)
-//
-//	With data manager token:
-//
-//	* Delete existing block
-//	  (test for 200 OK, response counts, confirm block deleted)
-//
-//	* Delete nonexistent block
-//	  (test for 200 OK, response counts)
-//
-//	TODO(twp):
-//
-//	* Delete block on read-only and read-write volume
-//	  (test for 200 OK, response with copies_deleted=1,
-//	  copies_failed=1, confirm block deleted only on r/w volume)
-//
-//	* Delete block on read-only volume only
-//	  (test for 200 OK, response with copies_deleted=0, copies_failed=1,
-//	  confirm block not deleted)
-func (s *keepstoreSuite) TestDeleteHandler(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-
-	vols := s.router.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-
-	// Explicitly set the BlobSigningTTL to 0 for these
-	// tests, to ensure the MockVolume deletes the blocks
-	// even though they have just been created.
+func (s *keepstoreSuite) TestBlockTrash(c *C) {
 	s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
-
-	var userToken = "NOT DATA MANAGER TOKEN"
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	s.cluster.Collections.BlobTrash = true
-
-	unauthReq := &RequestTester{
-		method: "DELETE",
-		uri:    "/" + TestHash,
-	}
-
-	userReq := &RequestTester{
-		method:   "DELETE",
-		uri:      "/" + TestHash,
-		apiToken: userToken,
-	}
-
-	superuserExistingBlockReq := &RequestTester{
-		method:   "DELETE",
-		uri:      "/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-
-	superuserNonexistentBlockReq := &RequestTester{
-		method:   "DELETE",
-		uri:      "/" + TestHash2,
-		apiToken: s.cluster.SystemRootToken,
-	}
-
-	// Unauthenticated request returns PermissionError.
-	var response *httptest.ResponseRecorder
-	response = IssueRequest(s.router, unauthReq)
-	ExpectStatusCode(c,
-		"unauthenticated request",
-		PermissionError.HTTPCode,
-		response)
-
-	// Authenticated non-admin request returns PermissionError.
-	response = IssueRequest(s.router, userReq)
-	ExpectStatusCode(c,
-		"authenticated non-admin request",
-		PermissionError.HTTPCode,
-		response)
-
-	// Authenticated admin request for nonexistent block.
-	type deletecounter struct {
-		Deleted int `json:"copies_deleted"`
-		Failed  int `json:"copies_failed"`
-	}
-	var responseDc, expectedDc deletecounter
-
-	response = IssueRequest(s.router, superuserNonexistentBlockReq)
-	ExpectStatusCode(c,
-		"data manager request, nonexistent block",
-		http.StatusNotFound,
-		response)
-
-	// Authenticated admin request for existing block while BlobTrash is false.
-	s.cluster.Collections.BlobTrash = false
-	response = IssueRequest(s.router, superuserExistingBlockReq)
-	ExpectStatusCode(c,
-		"authenticated request, existing block, method disabled",
-		MethodDisabledError.HTTPCode,
-		response)
-	s.cluster.Collections.BlobTrash = true
-
-	// Authenticated admin request for existing block.
-	response = IssueRequest(s.router, superuserExistingBlockReq)
-	ExpectStatusCode(c,
-		"data manager request, existing block",
-		http.StatusOK,
-		response)
-	// Expect response {"copies_deleted":1,"copies_failed":0}
-	expectedDc = deletecounter{1, 0}
-	json.NewDecoder(response.Body).Decode(&responseDc)
-	if responseDc != expectedDc {
-		c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
-			expectedDc, responseDc)
-	}
-	// Confirm the block has been deleted
-	buf := make([]byte, BlockSize)
-	_, err := vols[0].Get(context.Background(), TestHash, buf)
-	var blockDeleted = os.IsNotExist(err)
-	if !blockDeleted {
-		c.Error("superuserExistingBlockReq: block not deleted")
-	}
-
-	// A DELETE request on a block newer than BlobSigningTTL
-	// should return success but leave the block on the volume.
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
-
-	response = IssueRequest(s.router, superuserExistingBlockReq)
-	ExpectStatusCode(c,
-		"data manager request, existing block",
-		http.StatusOK,
-		response)
-	// Expect response {"copies_deleted":1,"copies_failed":0}
-	expectedDc = deletecounter{1, 0}
-	json.NewDecoder(response.Body).Decode(&responseDc)
-	if responseDc != expectedDc {
-		c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
-			expectedDc, responseDc)
-	}
-	// Confirm the block has NOT been deleted.
-	_, err = vols[0].Get(context.Background(), TestHash, buf)
-	if err != nil {
-		c.Errorf("testing delete on new block: %s\n", err)
-	}
-}
-
-// TestPullHandler
-//
-// Test handling of the PUT /pull statement.
-//
-// Cases tested: syntactically valid and invalid pull lists, from the
-// data manager and from unprivileged users:
-//
-//  1. Valid pull list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  2. Invalid pull request from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  3. Valid pull request from the data manager
-//     (expected result: 200 OK with request body "Received 3 pull
-//     requests"
-//
-//  4. Invalid pull request from the data manager
-//     (expected result: 400 Bad Request)
-//
-// Test that in the end, the pull manager received a good pull list with
-// the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously.  Make sure that none of them return 400
-// Bad Request and that pullq.GetList() returns a valid list.
-func (s *keepstoreSuite) TestPullHandler(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-
-	// Replace the router's pullq -- which the worker goroutines
-	// started by setup() are now receiving from -- with a new
-	// one, so we can see what the handler sends to it.
-	pullq := NewWorkQueue()
-	s.router.Handler.(*router).pullq = pullq
-
-	var userToken = "USER TOKEN"
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	goodJSON := []byte(`[
-		{
-			"locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
-			"servers":[
-				"http://server1",
-				"http://server2"
-		 	]
-		},
-		{
-			"locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
-			"servers":[]
-		},
-		{
-			"locator":"cccccccccccccccccccccccccccccccc+12345",
-			"servers":["http://server1"]
-		}
-	]`)
-
-	badJSON := []byte(`{ "key":"I'm a little teapot" }`)
-
-	type pullTest struct {
-		name         string
-		req          RequestTester
-		responseCode int
-		responseBody string
-	}
-	var testcases = []pullTest{
-		{
-			"Valid pull list from an ordinary user",
-			RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Invalid pull request from an ordinary user",
-			RequestTester{"/pull", userToken, "PUT", badJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Valid pull request from the data manager",
-			RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
-			http.StatusOK,
-			"Received 3 pull requests\n",
-		},
-		{
-			"Invalid pull request from the data manager",
-			RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
-			http.StatusBadRequest,
-			"",
-		},
-	}
-
-	for _, tst := range testcases {
-		response := IssueRequest(s.router, &tst.req)
-		ExpectStatusCode(c, tst.name, tst.responseCode, response)
-		ExpectBody(c, tst.name, tst.responseBody, response)
-	}
-
-	// The Keep pull manager should have received one good list with 3
-	// requests on it.
-	for i := 0; i < 3; i++ {
-		var item interface{}
-		select {
-		case item = <-pullq.NextItem:
-		case <-time.After(time.Second):
-			c.Error("timed out")
-		}
-		if _, ok := item.(PullRequest); !ok {
-			c.Errorf("item %v could not be parsed as a PullRequest", item)
-		}
-	}
-
-	expectChannelEmpty(c, pullq.NextItem)
-}
-
-// TestTrashHandler
-//
-// Test cases:
-//
-// Cases tested: syntactically valid and invalid trash lists, from the
-// data manager and from unprivileged users:
-//
-//  1. Valid trash list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  2. Invalid trash list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  3. Valid trash list from the data manager
-//     (expected result: 200 OK with request body "Received 3 trash
-//     requests"
-//
-//  4. Invalid trash list from the data manager
-//     (expected result: 400 Bad Request)
-//
-// Test that in the end, the trash collector received a good list
-// trash list with the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously.  Make sure that none of them return 400
-// Bad Request and that replica.Dump() returns a valid list.
-func (s *keepstoreSuite) TestTrashHandler(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-	// Replace the router's trashq -- which the worker goroutines
-	// started by setup() are now receiving from -- with a new
-	// one, so we can see what the handler sends to it.
-	trashq := NewWorkQueue()
-	s.router.Handler.(*router).trashq = trashq
-
-	var userToken = "USER TOKEN"
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	goodJSON := []byte(`[
-		{
-			"locator":"block1",
-			"block_mtime":1409082153
-		},
-		{
-			"locator":"block2",
-			"block_mtime":1409082153
-		},
-		{
-			"locator":"block3",
-			"block_mtime":1409082153
-		}
-	]`)
-
-	badJSON := []byte(`I am not a valid JSON string`)
-
-	type trashTest struct {
-		name         string
-		req          RequestTester
-		responseCode int
-		responseBody string
-	}
-
-	var testcases = []trashTest{
-		{
-			"Valid trash list from an ordinary user",
-			RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Invalid trash list from an ordinary user",
-			RequestTester{"/trash", userToken, "PUT", badJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Valid trash list from the data manager",
-			RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
-			http.StatusOK,
-			"Received 3 trash requests\n",
-		},
-		{
-			"Invalid trash list from the data manager",
-			RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
-			http.StatusBadRequest,
-			"",
-		},
-	}
-
-	for _, tst := range testcases {
-		response := IssueRequest(s.router, &tst.req)
-		ExpectStatusCode(c, tst.name, tst.responseCode, response)
-		ExpectBody(c, tst.name, tst.responseBody, response)
-	}
-
-	// The trash collector should have received one good list with 3
-	// requests on it.
-	for i := 0; i < 3; i++ {
-		item := <-trashq.NextItem
-		if _, ok := item.(TrashRequest); !ok {
-			c.Errorf("item %v could not be parsed as a TrashRequest", item)
-		}
-	}
-
-	expectChannelEmpty(c, trashq.NextItem)
+	c.Fatal("todo: trash block")
+	c.Fatal("todo: trash nonexistent block => 404")
 }
 
 func (s *keepstoreSuite) TestPutNeedsOnlyOneBuffer(c *C) {
@@ -873,45 +303,8 @@ func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
 	c.Fatal("todo")
 }
 
-// Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *keepstoreSuite) TestGetHandlerNoBufferLeak(c *C) {
-	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
-
-	vols := s.router.volmgr.AllWritable()
-	if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
-		c.Error(err)
-	}
-
-	ok := make(chan bool)
-	go func() {
-		for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
-			// Unauthenticated request, unsigned locator
-			// => OK
-			unsignedLocator := "/" + TestHash
-			response := IssueRequest(s.router,
-				&RequestTester{
-					method: "GET",
-					uri:    unsignedLocator,
-				})
-			ExpectStatusCode(c,
-				"Unauthenticated request, unsigned locator", http.StatusOK, response)
-			ExpectBody(c,
-				"Unauthenticated request, unsigned locator",
-				string(TestBlock),
-				response)
-		}
-		ok <- true
-	}()
-	select {
-	case <-time.After(20 * time.Second):
-		// If the buffer pool leaks, the test goroutine hangs.
-		c.Fatal("test did not finish, assuming pool leaked")
-	case <-ok:
-	}
-}
-
 func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
+	c.Fatal("todo: volume with no specified classes implies 'default'")
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
@@ -1088,49 +481,195 @@ func (s *keepstoreSuite) TestBlockRead_VolumeError503(c *C) {
 func init() {
 	driver["stub"] = func(params newVolumeParams) (volume, error) {
 		v := &stubVolume{
-			params: params,
-			data:   make(map[string]stubData),
+			params:  params,
+			data:    make(map[string]stubData),
+			stubLog: &stubLog,
 		}
-		v.BlockRead = v.blockRead
-		v.BlockWrite = v.blockWrite
-		v.DeviceID = v.deviceID
-		v.BlockTouch = v.blockTouch
-		v.BlockTrash = v.blockTrash
-		v.BlockUntrash = v.blockUntrash
-		v.Index = v.index
-		v.Mtime = v.mtime
-		v.EmptyTrash = v.emptyTrash
 		return v, nil
 	}
 }
 
+type stubLog struct {
+	sync.Mutex
+	bytes.Buffer
+}
+
+func (sl *stubLog) Printf(fmt string, args ...interface{}) {
+	if sl == nil {
+		return
+	}
+	sl.Lock()
+	defer sl.Unlock()
+	fmt.Fprintf(sl+"\n", fmt, args...)
+}
+
 type stubData struct {
 	mtime time.Time
 	data  []byte
+	trash time.Time
 }
 
 type stubVolume struct {
-	params newVolumeParams
-	data   map[string]stubData
-	mtx    sync.Mutex
-
-	BlockRead    func(ctx context.Context, hash string, writeTo io.Writer) (int, error)
-	BlockWrite   func(ctx context.Context, hash string, data []byte) error
-	DeviceID     func() string
-	BlockTouch   func(hash string) error
-	BlockTrash   func(hash string) error
-	BlockUntrash func(hash string) error
-	Index        func(ctx context.Context, prefix string, writeTo io.Writer) error
-	Mtime        func(hash string) (time.Time, error)
-	EmptyTrash   func()
+	params  newVolumeParams
+	data    map[string]stubData
+	stubLog *stubLog
+	mtx     sync.Mutex
+
+	// The following funcs enable tests to insert delays and
+	// failures. Each volume operation begins by calling the
+	// corresponding func (if non-nil). If the func returns an
+	// error, that error is returned to caller. Otherwise, the
+	// stub continues normally.
+	blockRead    func(ctx context.Context, hash string, writeTo io.Writer) (int, error)
+	blockWrite   func(ctx context.Context, hash string, data []byte) error
+	deviceID     func() string
+	blockTouch   func(hash string) error
+	blockTrash   func(hash string) error
+	blockUntrash func(hash string) error
+	index        func(ctx context.Context, prefix string, writeTo io.Writer) error
+	mtime        func(hash string) (time.Time, error)
+	emptyTrash   func()
+}
+
+func (v *stubVolume) log(op, hash string) {
+	// Note this intentionally crashes if len(hash)<32 -- if
+	// keepstore ever does that, tests should fail.
+	v.stubLog.Printf("%s %s %s", v.params.ConfigVolume.UUID, op, hash[29:32])
+}
+
+func (v *stubVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
+	v.log("read", hash)
+	if v.blockRead != nil {
+		n, err := v.blockRead(ctx, hash, writeTo)
+		if err != nil {
+			return n, err
+		}
+	}
+	v.mtx.Lock()
+	data, ok := v.data[hash]
+	v.mtx.Unlock()
+	if !ok {
+		return 0, os.ErrNotExist
+	}
+	return writeTo.Write(data)
 }
 
-func (*stubVolume) blockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {}
-func (*stubVolume) blockWrite(ctx context.Context, hash string, data []byte) error             {}
-func (*stubVolume) deviceID() string                                                           {}
-func (*stubVolume) blockTouch(hash string) error                                               {}
-func (*stubVolume) blockTrash(hash string) error                                               {}
-func (*stubVolume) blockUntrash(hash string) error                                             {}
-func (*stubVolume) index(ctx context.Context, prefix string, writeTo io.Writer) error          {}
-func (*stubVolume) mtime(hash string) (time.Time, error)                                       {}
-func (*stubVolume) emptyTrash()                                                                {}
+func (v *stubVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
+	v.log("write", hash)
+	if v.blockWrite != nil {
+		if err := v.blockWrite(ctx, hash, data); err != nil {
+			return err
+		}
+	}
+	v.mtx.Lock()
+	defer v.mtx.Unlock()
+	v.data[hash] = stubData{
+		mtime: time.Now(),
+		data:  append([]byte(nil), data...),
+	}
+	return nil
+}
+
+func (v *stubVolume) DeviceID() string {
+	return fmt.Sprintf("%p", v)
+}
+
+func (v *stubVolume) BlockTouch(hash string) error {
+	v.log("touch", hash)
+	if v.blockTouch != nil {
+		if err := v.blockTouch(hash); err != nil {
+			return err
+		}
+	}
+	v.mtx.Lock()
+	defer v.mtx.Unlock()
+	ent, ok := v.data[hash]
+	if !ok {
+		return os.ErrNotExist
+	}
+	ent.mtime = time.Now()
+	v.data[hash] = ent
+	return nil
+}
+
+func (v *stubVolume) BlockTrash(hash string) error {
+	v.log("trash", hash)
+	if v.blockTrash != nil {
+		if err := v.blockTrash(hash); err != nil {
+			return err
+		}
+	}
+	v.mtx.Lock()
+	defer v.mtx.Unlock()
+	ent, ok := v.data[hash]
+	if !ok || !ent.trash.IsZero() {
+		return os.ErrNotExist
+	}
+	ent.trash = time.Now().Add(v.Cluster.Collections.TrashLifetime.Duration())
+	v.data[hash] = ent
+	return nil
+}
+
+func (v *stubVolume) BlockUntrash(hash string) error {
+	v.log("untrash", hash)
+	if v.blockUntrash != nil {
+		if err := v.blockUntrash(hash); err != nil {
+			return err
+		}
+	}
+	v.mtx.Lock()
+	defer v.mtx.Unlock()
+	ent, ok := v.data[hash]
+	if !ok || ent.trash.IsZero() {
+		return os.ErrNotExist
+	}
+	ent.trash = time.Time{}
+	v.data[hash] = ent
+	return nil
+}
+
+func (v *stubVolume) Index(ctx context.Context, prefix string, writeTo io.Writer) error {
+	v.stubLog.Printf("%s index %s", v.params.ConfigVolume.UUID, prefix)
+	if v.index != nil {
+		if err := v.index(ctx, prefix, writeTo); err != nil {
+			return err
+		}
+	}
+	buf := &bytes.Buffer{}
+	v.mtx.Lock()
+	for hash, ent := range v.data {
+		if strings.HasPrefix(hash, prefix) {
+			fmt.Fprintf(buf, "%s+%s %d\n", hash, len(ent.data), ent.mtime.UnixNano())
+		}
+	}
+	v.mtx.Unlock()
+	_, err := io.Copy(writeTo, buf)
+	return err
+}
+
+func (v *stubVolume) Mtime(hash string) (time.Time, error) {
+	v.log("mtime", hash)
+	if v.mtime != nil {
+		if t, err := v.mtime(hash); err != nil {
+			return t, err
+		}
+	}
+	v.mtx.Lock()
+	defer v.mtx.Lock()
+	ent, ok := v.data[hash]
+	if !ok {
+		return time.Time{}, os.ErrNotExist
+	}
+	return ent.mtime, nil
+}
+
+func (v *stubVolume) EmptyTrash() {
+	v.stubLog.Printf("%s emptytrash", v.params.ConfigVolume.UUID)
+	v.mtx.Lock()
+	defer v.mtx.Unlock()
+	for hash, ent := range v.data {
+		if !ent.trash.IsZero() && time.Now().After(ent.trash) {
+			delete(v.data, hash)
+		}
+	}
+}
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
index def19d315e..567b4b22ad 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/router_test.go
@@ -31,8 +31,16 @@ type routerSuite struct {
 func (s *routerSuite) SetUpTest(c *C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
-		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
+		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"testclass1": true}},
+		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"testclass2": true}},
+	}
+	s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
+		"testclass1": arvados.StorageClassConfig{
+			Default: true,
+		},
+		"testclass2": arvados.StorageClassConfig{
+			Default: true,
+		},
 	}
 }
 
@@ -74,6 +82,43 @@ func (s *routerSuite) TestBlockRead_Token(c *C) {
 	c.Check(string(resp.Bytes()), Equals, "foo")
 }
 
+func (s *routerSuite) TestBlockWrite_Headers(c *C) {
+	router, cancel := testRouter(c, s.cluster)
+	defer cancel()
+
+	var req *http.Request
+	var resp httptest.ResponseRecorder
+
+	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+
+	req = httptest.NewRequest("GET", "http://example/"+fooHash, nil)
+	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+	req.Header.Set("X-Arvados-Replicas-Desired", "2")
+	resp = httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusOK)
+	c.Check(resp.Header.Get("X-Keep-Replicas-Stored"), Equals, "2")
+	c.Check(resp.Header.Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1; testclass2=1")
+
+	req = httptest.NewRequest("GET", "http://example/"+fooHash, nil)
+	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+	req.Header.Set("X-Keep-Storage-Classes", "testclass1")
+	resp = httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusOK)
+	c.Check(resp.Header.Get("X-Keep-Replicas-Stored"), Equals, "1")
+	c.Check(resp.Header.Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass1=1")
+
+	req = httptest.NewRequest("GET", "http://example/"+fooHash, nil)
+	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+	req.Header.Set("X-Keep-Storage-Classes", " , testclass2 , ")
+	resp = httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusOK)
+	c.Check(resp.Header.Get("X-Keep-Replicas-Stored"), Equals, "1")
+	c.Check(resp.Header.Get("X-Keep-Storage-Classes-Confirmed"), Equals, "testclass2=1")
+}
+
 func (s *routerSuite) TestBadRequest(c *C) {
 	router, cancel := testRouter(c, s.cluster)
 	defer cancel()
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
index 7caae1391e..cee954def1 100644
--- a/services/keepstore/trash_worker.go
+++ b/services/keepstore/trash_worker.go
@@ -60,7 +60,7 @@ func (t *trasher) SetTrashList(newlist []trashListItem) {
 }
 
 func (t *trasher) runWorker() {
-	var mntsAllowTrash []*mnt
+	var mntsAllowTrash []*mount
 	for _, mnt := range t.keepstore.mounts {
 		if mnt.AllowTrash {
 			mntsAllowTrash = append(mntsAllowTrash, mnt)
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
index 1c01051191..e3226aaea7 100644
--- a/services/keepstore/trash_worker_test.go
+++ b/services/keepstore/trash_worker_test.go
@@ -5,12 +5,6 @@
 package keepstore
 
 import (
-	"container/list"
-	"context"
-	"time"
-
-	"git.arvados.org/arvados.git/sdk/go/ctxlog"
-	"github.com/prometheus/client_golang/prometheus"
 	check "gopkg.in/check.v1"
 )
 
@@ -37,332 +31,15 @@ type TrashWorkerTestData struct {
 }
 
 func (s *routerSuite) TestTrashList_Clear(c *check.C) {
-	c.Fatal("todo")
+	c.Fatal("todo: update trash list")
+	c.Fatal("todo: clear trash list")
 }
 
-func (s *routerSuite) TestTrashList_Execute(c *check.C) {
+func (s *routerSuite) TestTrashList(c *check.C) {
 	c.Fatal("todo: trash nonexistent block")
 	c.Fatal("todo: trash existing block")
-	c.Fatal("todo: trash block on only one volume")
+	c.Fatal("todo: trash block on specified volume")
 	c.Fatal("todo: trash block on volume with AllowTrash=false")
-}
-
-func (s *routerSuite) TestTrashList_Clear(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: "5d41402abc4b2a76b9719d911017c592",
-		Block1:   []byte("hello"),
-
-		Locator2: "5d41402abc4b2a76b9719d911017c592",
-		Block2:   []byte("hello"),
-
-		CreateData: false,
-
-		DeleteLocator: "5d41402abc4b2a76b9719d911017c592",
-
-		ExpectLocator1: false,
-		ExpectLocator2: false,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on volume 1 of the keep servers. Expect
-// the second locator in volume 2 to be unaffected.
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume1(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash2,
-		Block2:   TestBlock2,
-
-		CreateData: true,
-
-		DeleteLocator: TestHash, // first locator
-
-		ExpectLocator1: false,
-		ExpectLocator2: true,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on volume 2 of the keep servers. Expect
-// the first locator in volume 1 to be unaffected.
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInVolume2(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash2,
-		Block2:   TestBlock2,
-
-		CreateData: true,
-
-		DeleteLocator: TestHash2, // locator 2
-
-		ExpectLocator1: true,
-		ExpectLocator2: false,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block with matching mtime for locator in both
-// volumes. Expect locator to be deleted from both volumes.
-func (s *HandlerSuite) TestTrashWorkerIntegration_LocatorInBothVolumes(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash,
-		Block2:   TestBlock,
-
-		CreateData: true,
-
-		DeleteLocator: TestHash,
-
-		ExpectLocator1: false,
-		ExpectLocator2: false,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Same locator with different Mtimes exists in both volumes. Delete
-// the second and expect the first to be still around.
-func (s *HandlerSuite) TestTrashWorkerIntegration_MtimeMatchesForLocator1ButNotForLocator2(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash,
-		Block2:   TestBlock,
-
-		CreateData:      true,
-		DifferentMtimes: true,
-
-		DeleteLocator: TestHash,
-
-		ExpectLocator1: true,
-		ExpectLocator2: false,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block that exists on both volumes with matching mtimes,
-// but specify a MountUUID in the request so it only gets deleted from
-// the first volume.
-func (s *HandlerSuite) TestTrashWorkerIntegration_SpecifyMountUUID(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash,
-		Block2:   TestBlock,
-
-		CreateData: true,
-
-		DeleteLocator:    TestHash,
-		SpecifyMountUUID: true,
-
-		ExpectLocator1: true,
-		ExpectLocator2: true,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Two different locators in volume 1. Delete one of them. Expect the
-// other unaffected.
-func (s *HandlerSuite) TestTrashWorkerIntegration_TwoDifferentLocatorsInVolume1(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash2,
-		Block2:   TestBlock2,
-
-		CreateData:      true,
-		CreateInVolume1: true,
-
-		DeleteLocator: TestHash, // locator 1
-
-		ExpectLocator1: false,
-		ExpectLocator2: true,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Allow default Trash Life time to be used. Thus, the newly created
-// block will not be deleted because its Mtime is within the trash
-// life time.
-func (s *HandlerSuite) TestTrashWorkerIntegration_SameLocatorInTwoVolumesWithDefaultTrashLifeTime(c *check.C) {
-	s.cluster.Collections.BlobTrash = true
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash2,
-		Block2:   TestBlock2,
-
-		CreateData:      true,
-		CreateInVolume1: true,
-
-		UseTrashLifeTime: true,
-
-		DeleteLocator: TestHash, // locator 1
-
-		// Since trash life time is in effect, block won't be deleted.
-		ExpectLocator1: true,
-		ExpectLocator2: true,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-// Delete a block with matching mtime for locator in both volumes, but
-// EnableDelete is false, so block won't be deleted.
-func (s *HandlerSuite) TestTrashWorkerIntegration_DisabledDelete(c *check.C) {
-	s.cluster.Collections.BlobTrash = false
-	testData := TrashWorkerTestData{
-		Locator1: TestHash,
-		Block1:   TestBlock,
-
-		Locator2: TestHash,
-		Block2:   TestBlock,
-
-		CreateData: true,
-
-		DeleteLocator: TestHash,
-
-		ExpectLocator1: true,
-		ExpectLocator2: true,
-	}
-	s.performTrashWorkerTest(c, testData)
-}
-
-func (s *HandlerSuite) performTrashWorkerTest(c *check.C, testData TrashWorkerTestData) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	// Replace the router's trashq -- which the worker goroutines
-	// started by setup() are now receiving from -- with a new
-	// one, so we can see what the handler sends to it.
-	trashq := NewWorkQueue()
-	s.handler.Handler.(*router).trashq = trashq
-
-	// Put test content
-	mounts := s.handler.volmgr.AllWritable()
-	if testData.CreateData {
-		mounts[0].Put(context.Background(), testData.Locator1, testData.Block1)
-		mounts[0].Put(context.Background(), testData.Locator1+".meta", []byte("metadata"))
-
-		if testData.CreateInVolume1 {
-			mounts[0].Put(context.Background(), testData.Locator2, testData.Block2)
-			mounts[0].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
-		} else {
-			mounts[1].Put(context.Background(), testData.Locator2, testData.Block2)
-			mounts[1].Put(context.Background(), testData.Locator2+".meta", []byte("metadata"))
-		}
-	}
-
-	oldBlockTime := time.Now().Add(-s.cluster.Collections.BlobSigningTTL.Duration() - time.Minute)
-
-	// Create TrashRequest for the test
-	trashRequest := TrashRequest{
-		Locator:    testData.DeleteLocator,
-		BlockMtime: oldBlockTime.UnixNano(),
-	}
-	if testData.SpecifyMountUUID {
-		trashRequest.MountUUID = s.handler.volmgr.Mounts()[0].UUID
-	}
-
-	// Run trash worker and put the trashRequest on trashq
-	trashList := list.New()
-	trashList.PushBack(trashRequest)
-
-	if !testData.UseTrashLifeTime {
-		// Trash worker would not delete block if its Mtime is
-		// within trash life time. Back-date the block to
-		// allow the deletion to succeed.
-		for _, mnt := range mounts {
-			mnt.Volume.(*MockVolume).Timestamps[testData.DeleteLocator] = oldBlockTime
-			if testData.DifferentMtimes {
-				oldBlockTime = oldBlockTime.Add(time.Second)
-			}
-		}
-	}
-	go RunTrashWorker(s.handler.volmgr, ctxlog.TestLogger(c), s.cluster, trashq)
-
-	// Install gate so all local operations block until we say go
-	gate := make(chan struct{})
-	for _, mnt := range mounts {
-		mnt.Volume.(*MockVolume).Gate = gate
-	}
-
-	assertStatusItem := func(k string, expect float64) {
-		if v := getStatusItem(s.handler, "TrashQueue", k); v != expect {
-			c.Errorf("Got %s %v, expected %v", k, v, expect)
-		}
-	}
-
-	assertStatusItem("InProgress", 0)
-	assertStatusItem("Queued", 0)
-
-	listLen := trashList.Len()
-	trashq.ReplaceQueue(trashList)
-
-	// Wait for worker to take request(s)
-	expectEqualWithin(c, time.Second, listLen, func() interface{} { return trashq.Status().InProgress })
-
-	// Ensure status.json also reports work is happening
-	assertStatusItem("InProgress", float64(1))
-	assertStatusItem("Queued", float64(listLen-1))
-
-	// Let worker proceed
-	close(gate)
-
-	// Wait for worker to finish
-	expectEqualWithin(c, time.Second, 0, func() interface{} { return trashq.Status().InProgress })
-
-	// Verify Locator1 to be un/deleted as expected
-	buf := make([]byte, BlockSize)
-	size, err := GetBlock(context.Background(), s.handler.volmgr, testData.Locator1, buf, nil)
-	if testData.ExpectLocator1 {
-		if size == 0 || err != nil {
-			c.Errorf("Expected Locator1 to be still present: %s", testData.Locator1)
-		}
-	} else {
-		if size > 0 || err == nil {
-			c.Errorf("Expected Locator1 to be deleted: %s", testData.Locator1)
-		}
-	}
-
-	// Verify Locator2 to be un/deleted as expected
-	if testData.Locator1 != testData.Locator2 {
-		size, err = GetBlock(context.Background(), s.handler.volmgr, testData.Locator2, buf, nil)
-		if testData.ExpectLocator2 {
-			if size == 0 || err != nil {
-				c.Errorf("Expected Locator2 to be still present: %s", testData.Locator2)
-			}
-		} else {
-			if size > 0 || err == nil {
-				c.Errorf("Expected Locator2 to be deleted: %s", testData.Locator2)
-			}
-		}
-	}
-
-	// The DifferentMtimes test puts the same locator in two
-	// different volumes, but only one copy has an Mtime matching
-	// the trash request.
-	if testData.DifferentMtimes {
-		locatorFoundIn := 0
-		for _, volume := range s.handler.volmgr.AllReadable() {
-			buf := make([]byte, BlockSize)
-			if _, err := volume.Get(context.Background(), testData.Locator1, buf); err == nil {
-				locatorFoundIn = locatorFoundIn + 1
-			}
-		}
-		c.Check(locatorFoundIn, check.Equals, 1)
-	}
+	c.Fatal("todo: trash block with unexpected timestamp")
+	c.Fatal("todo: trash block with recent timestamp")
 }
diff --git a/services/keepstore/unix_volume.go b/services/keepstore/unix_volume.go
index c04e53cad4..1fa5cfc0b2 100644
--- a/services/keepstore/unix_volume.go
+++ b/services/keepstore/unix_volume.go
@@ -329,19 +329,6 @@ func (v *UnixVolume) Status() *VolumeStatus {
 var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)
 var blockFileRe = regexp.MustCompile(`^[0-9a-f]{32}$`)
 
-// IndexTo writes (to the given Writer) a list of blocks found on this
-// volume which begin with the specified prefix. If the prefix is an
-// empty string, IndexTo writes a complete list of blocks.
-//
-// Each block is given in the format
-//
-//	locator+size modification-time {newline}
-//
-// e.g.:
-//
-//	e4df392f86be161ca6ed3773a962b8f3+67108864 1388894303
-//	e4d41e6fd68460e0e3fc18cc746959d2+67108864 1377796043
-//	e4de7a2810f5554cd39b36d8ddb132ff+67108864 1388701136
 func (v *UnixVolume) Index(ctx context.Context, prefix string, w io.Writer) error {
 	rootdir, err := v.os.Open(v.Root)
 	if err != nil {

commit 3e19bd4f2c24ddeabb6a219c5995e0250d1caf7b
Author: Tom Clegg <tom at curii.com>
Date:   Wed Jan 31 14:22:25 2024 -0500

    2960: Update tests, continued.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 0eab7e0adb..3b33f93bd2 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -507,6 +507,23 @@ func (ks *keepstore) BlockTouch(ctx context.Context, hash string) error {
 	return errToCaller
 }
 
+func (ks *keepstore) BlockTrash(ctx context.Context, hash string) error {
+	var errToCaller error = os.ErrNotExist
+	for _, mnt := range ks.mounts {
+		if !mnt.AllowTrash {
+			continue
+		}
+		if ctx.Err() != nil {
+			return ctx.Err()
+		}
+		err := mnt.BlockTrash(hash)
+		if !os.IsNotExist(err) {
+			errToCaller = err
+		}
+	}
+	return errToCaller
+}
+
 func (ks *keepstore) Mounts() []*mount {
 	return ks.mountsR
 }
diff --git a/services/keepstore/router_test.go b/services/keepstore/keepstore_test.go
similarity index 55%
copy from services/keepstore/router_test.go
copy to services/keepstore/keepstore_test.go
index 73cddcff26..7055d8e526 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/keepstore_test.go
@@ -7,28 +7,36 @@ package keepstore
 import (
 	"bytes"
 	"context"
+	"crypto/md5"
 	"encoding/json"
 	"fmt"
+	"io"
 	"net/http"
 	"net/http/httptest"
 	"os"
 	"sort"
 	"strings"
+	"sync"
 	"sync/atomic"
 	"time"
 
 	"git.arvados.org/arvados.git/lib/config"
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
+	"git.arvados.org/arvados.git/sdk/go/auth"
 	"git.arvados.org/arvados.git/sdk/go/ctxlog"
 	"github.com/prometheus/client_golang/prometheus"
-	check "gopkg.in/check.v1"
+	. "gopkg.in/check.v1"
 )
 
 var testServiceURL = func() arvados.URL {
 	return arvados.URL{Host: "localhost:12345", Scheme: "http"}
 }()
 
+func authContext(token string) context.Context {
+	return auth.NewContext(context.TODO(), auth.Credentials{Tokens: []string{token}})
+}
+
 func testCluster(t TB) *arvados.Cluster {
 	cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
 	if err != nil {
@@ -40,276 +48,100 @@ func testCluster(t TB) *arvados.Cluster {
 	}
 	cluster.SystemRootToken = arvadostest.SystemRootToken
 	cluster.ManagementToken = arvadostest.ManagementToken
-	cluster.Collections.BlobSigning = false
 	return cluster
 }
 
-var _ = check.Suite(&HandlerSuite{})
+func testKeepstore(t TB, cluster *arvados.Cluster) (*keepstore, context.CancelFunc) {
+	ctx, cancel := context.WithCancel(context.Background())
+	reg := prometheus.NewRegistry()
+	ks, err := newKeepstore(ctx, cluster, cluster.SystemRootToken, reg, testServiceURL)
+	t.Assert(err, IsNil)
+	return ks, cancel
+}
 
-type RouterSuite struct {
+var _ = Suite(&keepstoreSuite{})
+
+type keepstoreSuite struct {
 	cluster *arvados.Cluster
-	handler *router
 }
 
-func (s *RouterSuite) SetUpTest(c *check.C) {
+func (s *keepstoreSuite) SetUpTest(c *C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
 	}
-	s.handler = newHandlerOrErrorHandler(context.Background(), s.cluster, s.cluster.SystemRootToken, prometheus.NewRegistry()).(*router)
-}
-
-// A RequestTester represents the parameters for an HTTP request to
-// be issued on behalf of a unit test.
-type RequestTester struct {
-	uri            string
-	apiToken       string
-	method         string
-	requestBody    []byte
-	storageClasses string
 }
 
-// Test GetBlockHandler on the following situations:
-//   - permissions off, unauthenticated request, unsigned locator
-//   - permissions on, authenticated request, signed locator
-//   - permissions on, authenticated request, unsigned locator
-//   - permissions on, unauthenticated request, signed locator
-//   - permissions on, authenticated request, expired locator
-//   - permissions on, authenticated request, signed locator, transient error from backend
-func (s *RouterSuite) TestGetHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	vols := s.handler.volmgr.AllWritable()
-	err := vols[0].Put(context.Background(), TestHash, TestBlock)
-	c.Check(err, check.IsNil)
-
-	// Create locators for testing.
-	// Turn on permission settings so we can generate signed locators.
-	s.cluster.Collections.BlobSigning = true
-	s.cluster.Collections.BlobSigningKey = knownKey
-	s.cluster.Collections.BlobSigningTTL.Set("5m")
-
-	var (
-		unsignedLocator  = "/" + TestHash
-		validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
-		expiredTimestamp = time.Now().Add(-time.Hour)
-		signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
-		expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
-	)
-
-	// -----------------
-	// Test unauthenticated request with permissions off.
-	s.cluster.Collections.BlobSigning = false
+func (s *keepstoreSuite) TestBlockRead_ChecksumMismatch(c *C) {
+	ks, cancel := testKeepstore(c, s.cluster)
+	defer cancel()
 
-	// Unauthenticated request, unsigned locator
-	// => OK
-	response := IssueRequest(s.handler,
-		&RequestTester{
-			method: "GET",
-			uri:    unsignedLocator,
-		})
-	ExpectStatusCode(c,
-		"Unauthenticated request, unsigned locator", http.StatusOK, response)
-	ExpectBody(c,
-		"Unauthenticated request, unsigned locator",
-		string(TestBlock),
-		response)
-
-	receivedLen := response.Header().Get("Content-Length")
-	expectedLen := fmt.Sprintf("%d", len(TestBlock))
-	if receivedLen != expectedLen {
-		c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
-	}
-
-	// ----------------
-	// Permissions: on.
-	s.cluster.Collections.BlobSigning = true
-
-	// Authenticated request, signed locator
-	// => OK
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      signedLocator,
-		apiToken: knownToken,
-	})
-	ExpectStatusCode(c,
-		"Authenticated request, signed locator", http.StatusOK, response)
-	ExpectBody(c,
-		"Authenticated request, signed locator", string(TestBlock), response)
-
-	receivedLen = response.Header().Get("Content-Length")
-	expectedLen = fmt.Sprintf("%d", len(TestBlock))
-	if receivedLen != expectedLen {
-		c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
-	}
+	ctx := authContext(arvadostest.ActiveTokenV2)
 
-	// Authenticated request, unsigned locator
-	// => PermissionError
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      unsignedLocator,
-		apiToken: knownToken,
-	})
-	ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
+	fooHash := fmt.Sprintf("%x", md5.Sum([]byte("foo")))
+	err := ks.mountsW[0].BlockWrite(fooHash, []byte("bar"))
+	c.Assert(err, IsNil)
 
-	// Unauthenticated request, signed locator
-	// => PermissionError
-	response = IssueRequest(s.handler, &RequestTester{
-		method: "GET",
-		uri:    signedLocator,
+	err = ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+		Hash: fooHash,
+		Data: []byte("foo"),
 	})
-	ExpectStatusCode(c,
-		"Unauthenticated request, signed locator",
-		PermissionError.HTTPCode, response)
+	c.Check(err, ErrorMatches, "checksum mismatch")
 
-	// Authenticated request, expired locator
-	// => ExpiredError
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      expiredLocator,
-		apiToken: knownToken,
+	buf := bytes.NewBuffer(nil)
+	err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+		Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
+		WriteTo: buf,
 	})
-	ExpectStatusCode(c,
-		"Authenticated request, expired locator",
-		ExpiredError.HTTPCode, response)
+	c.Check(err, ErrorMatches, "checksum mismatch")
+	c.Check(buf.Len() < 3, Equals, true)
 
-	// Authenticated request, signed locator
-	// => 503 Server busy (transient error)
+	err = ks.mountsW[1].BlockWrite(fooHash, []byte("foo"))
+	c.Assert(err, IsNil)
 
-	// Set up the block owning volume to respond with errors
-	vols[0].Volume.(*MockVolume).Bad = true
-	vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      signedLocator,
-		apiToken: knownToken,
+	buf := bytes.NewBuffer(nil)
+	err = ks.BlockRead(ctx, arvados.BlockReadOptions{
+		Locator: ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3"),
+		WriteTo: buf,
 	})
-	// A transient error from one volume while the other doesn't find the block
-	// should make the service return a 503 so that clients can retry.
-	ExpectStatusCode(c,
-		"Volume backend busy",
-		503, response)
+	c.Check(err, ErrorMatches, "checksum mismatch")
+	c.Check(buf.Len() < 3, Equals, true)
 }
 
-// Test PutBlockHandler on the following situations:
-//   - no server key
-//   - with server key, authenticated request, unsigned locator
-//   - with server key, unauthenticated request, unsigned locator
-func (s *RouterSuite) TestPutHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	// --------------
-	// No server key.
-
-	s.cluster.Collections.BlobSigningKey = ""
-
-	// Unauthenticated request, no server key
-	// => OK (unsigned response)
-	unsignedLocator := "/" + TestHash
-	response := IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         unsignedLocator,
-			requestBody: TestBlock,
-		})
-
-	ExpectStatusCode(c,
-		"Unauthenticated request, no server key", http.StatusOK, response)
-	ExpectBody(c,
-		"Unauthenticated request, no server key",
-		TestHashPutResp, response)
-
-	// ------------------
-	// With a server key.
-
-	s.cluster.Collections.BlobSigningKey = knownKey
-	s.cluster.Collections.BlobSigningTTL.Set("5m")
-
-	// When a permission key is available, the locator returned
-	// from an authenticated PUT request will be signed.
-
-	// Authenticated PUT, signed locator
-	// => OK (signed response)
-	response = IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         unsignedLocator,
-			requestBody: TestBlock,
-			apiToken:    knownToken,
-		})
-
-	ExpectStatusCode(c,
-		"Authenticated PUT, signed locator, with server key",
-		http.StatusOK, response)
-	responseLocator := strings.TrimSpace(response.Body.String())
-	if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
-		c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
-			"response '%s' does not contain a valid signature",
-			responseLocator)
-	}
-
-	// Unauthenticated PUT, unsigned locator
-	// => OK
-	response = IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         unsignedLocator,
-			requestBody: TestBlock,
-		})
-
-	ExpectStatusCode(c,
-		"Unauthenticated PUT, unsigned locator, with server key",
-		http.StatusOK, response)
-	ExpectBody(c,
-		"Unauthenticated PUT, unsigned locator, with server key",
-		TestHashPutResp, response)
-}
-
-func (s *RouterSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
-	s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestBlockReadWrite_SigningDisabled(c *C) {
+	s.cluster.Collections.BlobSigning = false
+	ks, cancel := testKeepstore(c, s.cluster)
+	defer cancel()
 
-	s.cluster.SystemRootToken = "fake-data-manager-token"
-	IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         "/" + TestHash,
-			requestBody: TestBlock,
-		})
+	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
 
-	s.cluster.Collections.BlobTrash = true
-	IssueRequest(s.handler,
-		&RequestTester{
-			method:      "DELETE",
-			uri:         "/" + TestHash,
-			requestBody: TestBlock,
-			apiToken:    s.cluster.SystemRootToken,
-		})
-	type expect struct {
-		volid     string
-		method    string
-		callcount int
-	}
-	for _, e := range []expect{
-		{"zzzzz-nyw5e-000000000000000", "Get", 0},
-		{"zzzzz-nyw5e-000000000000000", "Compare", 0},
-		{"zzzzz-nyw5e-000000000000000", "Touch", 0},
-		{"zzzzz-nyw5e-000000000000000", "Put", 0},
-		{"zzzzz-nyw5e-000000000000000", "Delete", 0},
-		{"zzzzz-nyw5e-111111111111111", "Get", 0},
-		{"zzzzz-nyw5e-111111111111111", "Compare", 1},
-		{"zzzzz-nyw5e-111111111111111", "Touch", 1},
-		{"zzzzz-nyw5e-111111111111111", "Put", 1},
-		{"zzzzz-nyw5e-111111111111111", "Delete", 1},
-	} {
-		if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
-			c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
+	resp, err := ks.BlockWrite(authContext("abcde"), arvados.BlockWriteOptions{
+		Hash: fooHash,
+		Data: []byte("foo"),
+	})
+	c.Assert(err, IsNil)
+	c.Check(resp.Locator, Equals, fooHash+"+3")
+	locUnsigned := resp.Locator
+	locSigned := ks.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+	c.Assert(locSigned, Not(Equals), locUnsigned)
+
+	for _, locator := range []string{locUnsigned, locSigned} {
+		for _, token := range []string{"", "xyzzy", arvadostest.ActiveTokenV2} {
+			c.Logf("=== locator %q token %q", locator, token)
+			ctx := authContext(token)
+			buf := bytes.NewBuffer(nil)
+			_, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
+				Locator: locator,
+				WriteTo: buf,
+			})
+			c.Check(err, IsNil)
+			c.Check(string(buf.Bytes()), Equals, "foo")
 		}
 	}
 }
 
-func (s *RouterSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
+func (s *keepstoreSuite) TestBlockRead_OrderedByStorageClassPriority(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
 			Driver:         "mock",
@@ -334,30 +166,34 @@ func (s *RouterSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
 		{66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
 	} {
 		c.Logf("%+v", trial)
+
 		s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
 			"class1": {Priority: trial.priority1},
 			"class2": {Priority: trial.priority2},
 			"class3": {Priority: trial.priority3},
 		}
-		c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-		IssueRequest(s.handler,
-			&RequestTester{
-				method:         "PUT",
-				uri:            "/" + TestHash,
-				requestBody:    TestBlock,
-				storageClasses: "class1",
-			})
-		IssueRequest(s.handler,
-			&RequestTester{
-				method: "GET",
-				uri:    "/" + TestHash,
-			})
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
+		ks, cancel := testKeepstore(c, s.cluster)
+		defer cancel()
+		ctx := authContext(arvadostest.ActiveTokenV2)
+
+		resp, err := ks.BlockWrite(ctx, arvados.BlockWriteOptions{
+			Hash:           fooHash,
+			Data:           []byte("foo"),
+			StorageClasses: []string{"class1"},
+		})
+		c.Assert(err, IsNil)
+		n, err := ks.BlockRead(ctx, arvados.BlockReadOptions{
+			Locator: resp.Locator,
+			WriteTo: io.Discard,
+		})
+		c.Assert(n, Equals, 3)
+		c.Assert(err, IsNil)
+		c.Check(s.keepstore.mounts["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), Equals, trial.get1)
+		c.Check(s.keepstore.mounts["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), Equals, trial.get2)
 	}
 }
 
-func (s *RouterSuite) TestPutWithNoWritableVolumes(c *check.C) {
+func (s *keepstoreSuite) TestPutWithNoWritableVolumes(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
 			Driver:         "mock",
@@ -365,19 +201,19 @@ func (s *RouterSuite) TestPutWithNoWritableVolumes(c *check.C) {
 			ReadOnly:       true,
 			StorageClasses: map[string]bool{"class1": true}},
 	}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	resp := IssueRequest(s.handler,
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
+	resp := IssueRequest(s.router,
 		&RequestTester{
 			method:         "PUT",
 			uri:            "/" + TestHash,
 			requestBody:    TestBlock,
 			storageClasses: "class1",
 		})
-	c.Check(resp.Code, check.Equals, FullError.HTTPCode)
-	c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
+	c.Check(resp.Code, Equals, FullError.HTTPCode)
+	c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), Equals, 0)
 }
 
-func (s *RouterSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
+func (s *keepstoreSuite) TestConcurrentWritesToMultipleStorageClasses(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
 			Driver:         "mock",
@@ -425,10 +261,10 @@ func (s *RouterSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
 			"class2": {},
 			"class3": {},
 		}
-		c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-		atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
+		c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
+		atomic.StoreUint32(&s.router.volmgr.counter, trial.setCounter)
 		for i := 0; i < 2; i++ {
-			IssueRequest(s.handler,
+			IssueRequest(s.router,
 				&RequestTester{
 					method:         "PUT",
 					uri:            "/" + TestHash,
@@ -436,30 +272,30 @@ func (s *RouterSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
 					storageClasses: trial.classes,
 				})
 		}
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
+		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), Equals, trial.put111)
+		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), Equals, trial.put121)
+		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), Equals, trial.put222)
+		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), Equals, trial.cmp111)
+		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), Equals, trial.cmp121)
+		c.Check(s.router.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), Equals, trial.cmp222)
 	}
 }
 
 // Test TOUCH requests.
-func (s *RouterSuite) TestTouchHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	vols := s.handler.volmgr.AllWritable()
+func (s *keepstoreSuite) TestTouchHandler(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
+	vols := s.router.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 	vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
 	afterPut := time.Now()
 	t, err := vols[0].Mtime(TestHash)
-	c.Assert(err, check.IsNil)
-	c.Assert(t.Before(afterPut), check.Equals, true)
+	c.Assert(err, IsNil)
+	c.Assert(t.Before(afterPut), Equals, true)
 
 	ExpectStatusCode(c,
 		"touch with no credentials",
 		http.StatusUnauthorized,
-		IssueRequest(s.handler, &RequestTester{
+		IssueRequest(s.router, &RequestTester{
 			method: "TOUCH",
 			uri:    "/" + TestHash,
 		}))
@@ -467,7 +303,7 @@ func (s *RouterSuite) TestTouchHandler(c *check.C) {
 	ExpectStatusCode(c,
 		"touch with non-root credentials",
 		http.StatusUnauthorized,
-		IssueRequest(s.handler, &RequestTester{
+		IssueRequest(s.router, &RequestTester{
 			method:   "TOUCH",
 			uri:      "/" + TestHash,
 			apiToken: arvadostest.ActiveTokenV2,
@@ -476,7 +312,7 @@ func (s *RouterSuite) TestTouchHandler(c *check.C) {
 	ExpectStatusCode(c,
 		"touch non-existent block",
 		http.StatusNotFound,
-		IssueRequest(s.handler, &RequestTester{
+		IssueRequest(s.router, &RequestTester{
 			method:   "TOUCH",
 			uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
 			apiToken: s.cluster.SystemRootToken,
@@ -486,14 +322,14 @@ func (s *RouterSuite) TestTouchHandler(c *check.C) {
 	ExpectStatusCode(c,
 		"touch block",
 		http.StatusOK,
-		IssueRequest(s.handler, &RequestTester{
+		IssueRequest(s.router, &RequestTester{
 			method:   "TOUCH",
 			uri:      "/" + TestHash,
 			apiToken: s.cluster.SystemRootToken,
 		}))
 	t, err = vols[0].Mtime(TestHash)
-	c.Assert(err, check.IsNil)
-	c.Assert(t.After(beforeTouch), check.Equals, true)
+	c.Assert(err, IsNil)
+	c.Assert(t.After(beforeTouch), Equals, true)
 }
 
 // Test /index requests:
@@ -506,12 +342,12 @@ func (s *RouterSuite) TestTouchHandler(c *check.C) {
 //
 // The only /index requests that should succeed are those issued by the
 // superuser. They should pass regardless of the value of BlobSigning.
-func (s *RouterSuite) TestIndexHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestIndexHandler(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
 	// Include multiple blocks on different volumes, and
 	// some metadata files (which should be omitted from index listings)
-	vols := s.handler.volmgr.AllWritable()
+	vols := s.router.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 	vols[1].Put(context.Background(), TestHash2, TestBlock2)
 	vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
@@ -568,7 +404,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// unauthenticated /index request
 	// => UnauthorizedError
-	response := IssueRequest(s.handler, unauthenticatedReq)
+	response := IssueRequest(s.router, unauthenticatedReq)
 	ExpectStatusCode(c,
 		"permissions on, unauthenticated request",
 		UnauthorizedError.HTTPCode,
@@ -576,7 +412,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// unauthenticated /index/prefix request
 	// => UnauthorizedError
-	response = IssueRequest(s.handler, unauthPrefixReq)
+	response = IssueRequest(s.router, unauthPrefixReq)
 	ExpectStatusCode(c,
 		"permissions on, unauthenticated /index/prefix request",
 		UnauthorizedError.HTTPCode,
@@ -584,7 +420,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// authenticated /index request, non-superuser
 	// => UnauthorizedError
-	response = IssueRequest(s.handler, authenticatedReq)
+	response = IssueRequest(s.router, authenticatedReq)
 	ExpectStatusCode(c,
 		"permissions on, authenticated request, non-superuser",
 		UnauthorizedError.HTTPCode,
@@ -592,7 +428,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// authenticated /index/prefix request, non-superuser
 	// => UnauthorizedError
-	response = IssueRequest(s.handler, authPrefixReq)
+	response = IssueRequest(s.router, authPrefixReq)
 	ExpectStatusCode(c,
 		"permissions on, authenticated /index/prefix request, non-superuser",
 		UnauthorizedError.HTTPCode,
@@ -600,7 +436,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// superuser /index request
 	// => OK
-	response = IssueRequest(s.handler, superuserReq)
+	response = IssueRequest(s.router, superuserReq)
 	ExpectStatusCode(c,
 		"permissions on, superuser request",
 		http.StatusOK,
@@ -613,7 +449,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// superuser /index request
 	// => OK
-	response = IssueRequest(s.handler, superuserReq)
+	response = IssueRequest(s.router, superuserReq)
 	ExpectStatusCode(c,
 		"permissions on, superuser request",
 		http.StatusOK,
@@ -621,24 +457,24 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	expected := `^` + TestHash + `\+\d+ \d+\n` +
 		TestHash2 + `\+\d+ \d+\n\n$`
-	c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
+	c.Check(response.Body.String(), Matches, expected, Commentf(
 		"permissions on, superuser request"))
 
 	// superuser /index/prefix request
 	// => OK
-	response = IssueRequest(s.handler, superuserPrefixReq)
+	response = IssueRequest(s.router, superuserPrefixReq)
 	ExpectStatusCode(c,
 		"permissions on, superuser request",
 		http.StatusOK,
 		response)
 
 	expected = `^` + TestHash + `\+\d+ \d+\n\n$`
-	c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
+	c.Check(response.Body.String(), Matches, expected, Commentf(
 		"permissions on, superuser /index/prefix request"))
 
 	// superuser /index/{no-such-prefix} request
 	// => OK
-	response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
+	response = IssueRequest(s.router, superuserNoSuchPrefixReq)
 	ExpectStatusCode(c,
 		"permissions on, superuser request",
 		http.StatusOK,
@@ -650,7 +486,7 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 
 	// superuser /index/{invalid-prefix} request
 	// => StatusBadRequest
-	response = IssueRequest(s.handler, superuserInvalidPrefixReq)
+	response = IssueRequest(s.router, superuserInvalidPrefixReq)
 	ExpectStatusCode(c,
 		"permissions on, superuser request",
 		http.StatusBadRequest,
@@ -682,10 +518,10 @@ func (s *RouterSuite) TestIndexHandler(c *check.C) {
 //	* Delete block on read-only volume only
 //	  (test for 200 OK, response with copies_deleted=0, copies_failed=1,
 //	  confirm block not deleted)
-func (s *RouterSuite) TestDeleteHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestDeleteHandler(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
-	vols := s.handler.volmgr.AllWritable()
+	vols := s.router.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 
 	// Explicitly set the BlobSigningTTL to 0 for these
@@ -723,14 +559,14 @@ func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 
 	// Unauthenticated request returns PermissionError.
 	var response *httptest.ResponseRecorder
-	response = IssueRequest(s.handler, unauthReq)
+	response = IssueRequest(s.router, unauthReq)
 	ExpectStatusCode(c,
 		"unauthenticated request",
 		PermissionError.HTTPCode,
 		response)
 
 	// Authenticated non-admin request returns PermissionError.
-	response = IssueRequest(s.handler, userReq)
+	response = IssueRequest(s.router, userReq)
 	ExpectStatusCode(c,
 		"authenticated non-admin request",
 		PermissionError.HTTPCode,
@@ -743,7 +579,7 @@ func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 	}
 	var responseDc, expectedDc deletecounter
 
-	response = IssueRequest(s.handler, superuserNonexistentBlockReq)
+	response = IssueRequest(s.router, superuserNonexistentBlockReq)
 	ExpectStatusCode(c,
 		"data manager request, nonexistent block",
 		http.StatusNotFound,
@@ -751,7 +587,7 @@ func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 
 	// Authenticated admin request for existing block while BlobTrash is false.
 	s.cluster.Collections.BlobTrash = false
-	response = IssueRequest(s.handler, superuserExistingBlockReq)
+	response = IssueRequest(s.router, superuserExistingBlockReq)
 	ExpectStatusCode(c,
 		"authenticated request, existing block, method disabled",
 		MethodDisabledError.HTTPCode,
@@ -759,7 +595,7 @@ func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 	s.cluster.Collections.BlobTrash = true
 
 	// Authenticated admin request for existing block.
-	response = IssueRequest(s.handler, superuserExistingBlockReq)
+	response = IssueRequest(s.router, superuserExistingBlockReq)
 	ExpectStatusCode(c,
 		"data manager request, existing block",
 		http.StatusOK,
@@ -784,7 +620,7 @@ func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 	s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
 
-	response = IssueRequest(s.handler, superuserExistingBlockReq)
+	response = IssueRequest(s.router, superuserExistingBlockReq)
 	ExpectStatusCode(c,
 		"data manager request, existing block",
 		http.StatusOK,
@@ -829,14 +665,14 @@ func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 // TODO(twp): test concurrency: launch 100 goroutines to update the
 // pull list simultaneously.  Make sure that none of them return 400
 // Bad Request and that pullq.GetList() returns a valid list.
-func (s *RouterSuite) TestPullHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestPullHandler(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
 	// Replace the router's pullq -- which the worker goroutines
 	// started by setup() are now receiving from -- with a new
 	// one, so we can see what the handler sends to it.
 	pullq := NewWorkQueue()
-	s.handler.Handler.(*router).pullq = pullq
+	s.router.Handler.(*router).pullq = pullq
 
 	var userToken = "USER TOKEN"
 	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
@@ -895,7 +731,7 @@ func (s *RouterSuite) TestPullHandler(c *check.C) {
 	}
 
 	for _, tst := range testcases {
-		response := IssueRequest(s.handler, &tst.req)
+		response := IssueRequest(s.router, &tst.req)
 		ExpectStatusCode(c, tst.name, tst.responseCode, response)
 		ExpectBody(c, tst.name, tst.responseBody, response)
 	}
@@ -943,13 +779,13 @@ func (s *RouterSuite) TestPullHandler(c *check.C) {
 // TODO(twp): test concurrency: launch 100 goroutines to update the
 // pull list simultaneously.  Make sure that none of them return 400
 // Bad Request and that replica.Dump() returns a valid list.
-func (s *RouterSuite) TestTrashHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestTrashHandler(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 	// Replace the router's trashq -- which the worker goroutines
 	// started by setup() are now receiving from -- with a new
 	// one, so we can see what the handler sends to it.
 	trashq := NewWorkQueue()
-	s.handler.Handler.(*router).trashq = trashq
+	s.router.Handler.(*router).trashq = trashq
 
 	var userToken = "USER TOKEN"
 	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
@@ -1006,7 +842,7 @@ func (s *RouterSuite) TestTrashHandler(c *check.C) {
 	}
 
 	for _, tst := range testcases {
-		response := IssueRequest(s.handler, &tst.req)
+		response := IssueRequest(s.router, &tst.req)
 		ExpectStatusCode(c, tst.name, tst.responseCode, response)
 		ExpectBody(c, tst.name, tst.responseBody, response)
 	}
@@ -1023,82 +859,9 @@ func (s *RouterSuite) TestTrashHandler(c *check.C) {
 	expectChannelEmpty(c, trashq.NextItem)
 }
 
-// ====================
-// Helper functions
-// ====================
-
-// IssueTestRequest executes an HTTP request described by rt, to a
-// REST router.  It returns the HTTP response to the request.
-func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
-	response := httptest.NewRecorder()
-	body := bytes.NewReader(rt.requestBody)
-	req, _ := http.NewRequest(rt.method, rt.uri, body)
-	if rt.apiToken != "" {
-		req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
-	}
-	if rt.storageClasses != "" {
-		req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
-	}
-	handler.ServeHTTP(response, req)
-	return response
-}
-
-func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
-	response := httptest.NewRecorder()
-	body := bytes.NewReader(rt.requestBody)
-	req, _ := http.NewRequest(rt.method, rt.uri, body)
-	if rt.apiToken != "" {
-		req.Header.Set("Authorization", "Bearer "+rt.apiToken)
-	}
-	handler.ServeHTTP(response, req)
-	return response
-}
-
-// ExpectStatusCode checks whether a response has the specified status code,
-// and reports a test failure if not.
-func ExpectStatusCode(
-	c *check.C,
-	testname string,
-	expectedStatus int,
-	response *httptest.ResponseRecorder) {
-	c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
-}
-
-func ExpectBody(
-	c *check.C,
-	testname string,
-	expectedBody string,
-	response *httptest.ResponseRecorder) {
-	if expectedBody != "" && response.Body.String() != expectedBody {
-		c.Errorf("%s: expected response body '%s', got %+v",
-			testname, expectedBody, response)
-	}
-}
-
-// See #7121
-func (s *RouterSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	defer func(orig *bufferPool) {
-		bufs = orig
-	}(bufs)
-	bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-
-	ok := make(chan struct{})
-	go func() {
-		for i := 0; i < 2; i++ {
-			response := IssueRequest(s.handler,
-				&RequestTester{
-					method:      "PUT",
-					uri:         "/" + TestHash,
-					requestBody: TestBlock,
-				})
-			ExpectStatusCode(c,
-				"TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
-		}
-		ok <- struct{}{}
-	}()
-
+func (s *keepstoreSuite) TestPutNeedsOnlyOneBuffer(c *C) {
+	c.Fatal("todo")
+	ok := make(chan bool)
 	select {
 	case <-ok:
 	case <-time.After(time.Second):
@@ -1106,82 +869,16 @@ func (s *RouterSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
 	}
 }
 
-// Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *RouterSuite) TestPutHandlerNoBufferleak(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	ok := make(chan bool)
-	go func() {
-		for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
-			// Unauthenticated request, no server key
-			// => OK (unsigned response)
-			unsignedLocator := "/" + TestHash
-			response := IssueRequest(s.handler,
-				&RequestTester{
-					method:      "PUT",
-					uri:         unsignedLocator,
-					requestBody: TestBlock,
-				})
-			ExpectStatusCode(c,
-				"TestPutHandlerBufferleak", http.StatusOK, response)
-			ExpectBody(c,
-				"TestPutHandlerBufferleak",
-				TestHashPutResp, response)
-		}
-		ok <- true
-	}()
-	select {
-	case <-time.After(20 * time.Second):
-		// If the buffer pool leaks, the test goroutine hangs.
-		c.Fatal("test did not finish, assuming pool leaked")
-	case <-ok:
-	}
-}
-
-func (s *RouterSuite) TestGetHandlerClientDisconnect(c *check.C) {
-	s.cluster.Collections.BlobSigning = false
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	defer func(orig *bufferPool) {
-		bufs = orig
-	}(bufs)
-	bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-	defer bufs.Put(bufs.Get(BlockSize))
-
-	err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
-	c.Assert(err, check.IsNil)
-
-	resp := httptest.NewRecorder()
-	ok := make(chan struct{})
-	go func() {
-		ctx, cancel := context.WithCancel(context.Background())
-		req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
-		cancel()
-		s.handler.ServeHTTP(resp, req)
-		ok <- struct{}{}
-	}()
-
-	select {
-	case <-time.After(20 * time.Second):
-		c.Fatal("request took >20s, close notifier must be broken")
-	case <-ok:
-	}
-
-	ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
-	for i, v := range s.handler.volmgr.AllWritable() {
-		if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
-			c.Errorf("volume %d got %d calls, expected 0", i, calls)
-		}
-	}
+func (s *keepstoreSuite) TestBufferPoolLeak(c *C) {
+	c.Fatal("todo")
 }
 
 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
 // leak.
-func (s *RouterSuite) TestGetHandlerNoBufferLeak(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestGetHandlerNoBufferLeak(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
-	vols := s.handler.volmgr.AllWritable()
+	vols := s.router.volmgr.AllWritable()
 	if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
 		c.Error(err)
 	}
@@ -1192,7 +889,7 @@ func (s *RouterSuite) TestGetHandlerNoBufferLeak(c *check.C) {
 			// Unauthenticated request, unsigned locator
 			// => OK
 			unsignedLocator := "/" + TestHash
-			response := IssueRequest(s.handler,
+			response := IssueRequest(s.router,
 				&RequestTester{
 					method: "GET",
 					uri:    unsignedLocator,
@@ -1214,13 +911,13 @@ func (s *RouterSuite) TestGetHandlerNoBufferLeak(c *check.C) {
 	}
 }
 
-func (s *RouterSuite) TestPutStorageClasses(c *check.C) {
+func (s *keepstoreSuite) TestPutStorageClasses(c *C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
 		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
 	}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 	rt := RequestTester{
 		method:      "PUT",
 		uri:         "/" + TestHash,
@@ -1242,12 +939,12 @@ func (s *RouterSuite) TestPutStorageClasses(c *check.C) {
 	} {
 		c.Logf("success case %#v", trial)
 		rt.storageClasses = trial.ask
-		resp := IssueRequest(s.handler, &rt)
+		resp := IssueRequest(s.router, &rt)
 		if trial.expect == "" {
 			// any non-empty value is correct
-			c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
+			c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Not(Equals), "")
 		} else {
-			c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
+			c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), Equals, trial.expect)
 		}
 	}
 
@@ -1260,8 +957,8 @@ func (s *RouterSuite) TestPutStorageClasses(c *check.C) {
 	} {
 		c.Logf("failure case %#v", trial)
 		rt.storageClasses = trial.ask
-		resp := IssueRequest(s.handler, &rt)
-		c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
+		resp := IssueRequest(s.router, &rt)
+		c.Check(resp.Code, Equals, http.StatusServiceUnavailable)
 	}
 }
 
@@ -1271,24 +968,24 @@ func sortCommaSeparated(s string) string {
 	return strings.Join(slice, ", ")
 }
 
-func (s *RouterSuite) TestPutResponseHeader(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestPutResponseHeader(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
-	resp := IssueRequest(s.handler, &RequestTester{
+	resp := IssueRequest(s.router, &RequestTester{
 		method:      "PUT",
 		uri:         "/" + TestHash,
 		requestBody: TestBlock,
 	})
 	c.Logf("%#v", resp)
-	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
-	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
+	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), Equals, "1")
+	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), Equals, "default=1")
 }
 
-func (s *RouterSuite) TestUntrashHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *keepstoreSuite) TestUntrashHandler(c *C) {
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
 	// Set up Keep volumes
-	vols := s.handler.volmgr.AllWritable()
+	vols := s.router.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 
 	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
@@ -1298,7 +995,7 @@ func (s *RouterSuite) TestUntrashHandler(c *check.C) {
 		method: "PUT",
 		uri:    "/untrash/" + TestHash,
 	}
-	response := IssueRequest(s.handler, unauthenticatedReq)
+	response := IssueRequest(s.router, unauthenticatedReq)
 	ExpectStatusCode(c,
 		"Unauthenticated request",
 		UnauthorizedError.HTTPCode,
@@ -1311,7 +1008,7 @@ func (s *RouterSuite) TestUntrashHandler(c *check.C) {
 		apiToken: knownToken,
 	}
 
-	response = IssueRequest(s.handler, notDataManagerReq)
+	response = IssueRequest(s.router, notDataManagerReq)
 	ExpectStatusCode(c,
 		"Non-datamanager token",
 		UnauthorizedError.HTTPCode,
@@ -1323,7 +1020,7 @@ func (s *RouterSuite) TestUntrashHandler(c *check.C) {
 		uri:      "/untrash/thisisnotalocator",
 		apiToken: s.cluster.SystemRootToken,
 	}
-	response = IssueRequest(s.handler, datamanagerWithBadHashReq)
+	response = IssueRequest(s.router, datamanagerWithBadHashReq)
 	ExpectStatusCode(c,
 		"Bad locator in untrash request",
 		http.StatusBadRequest,
@@ -1335,7 +1032,7 @@ func (s *RouterSuite) TestUntrashHandler(c *check.C) {
 		uri:      "/untrash/" + TestHash,
 		apiToken: s.cluster.SystemRootToken,
 	}
-	response = IssueRequest(s.handler, datamanagerWrongMethodReq)
+	response = IssueRequest(s.router, datamanagerWrongMethodReq)
 	ExpectStatusCode(c,
 		"Only PUT method is supported for untrash",
 		http.StatusMethodNotAllowed,
@@ -1347,21 +1044,21 @@ func (s *RouterSuite) TestUntrashHandler(c *check.C) {
 		uri:      "/untrash/" + TestHash,
 		apiToken: s.cluster.SystemRootToken,
 	}
-	response = IssueRequest(s.handler, datamanagerReq)
+	response = IssueRequest(s.router, datamanagerReq)
 	ExpectStatusCode(c,
 		"",
 		http.StatusOK,
 		response)
-	c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
+	c.Check(response.Body.String(), Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
 }
 
-func (s *RouterSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
+func (s *keepstoreSuite) TestUntrashHandlerWithNoWritableVolumes(c *C) {
 	// Change all volumes to read-only
 	for uuid, v := range s.cluster.Volumes {
 		v.ReadOnly = true
 		s.cluster.Volumes[uuid] = v
 	}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+	c.Assert(s.router.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
 	// datamanagerReq => StatusOK
 	datamanagerReq := &RequestTester{
@@ -1369,28 +1066,71 @@ func (s *RouterSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
 		uri:      "/untrash/" + TestHash,
 		apiToken: s.cluster.SystemRootToken,
 	}
-	response := IssueRequest(s.handler, datamanagerReq)
+	response := IssueRequest(s.router, datamanagerReq)
 	ExpectStatusCode(c,
 		"No writable volumes",
 		http.StatusNotFound,
 		response)
 }
 
-func (s *RouterSuite) TestHealthCheckPing(c *check.C) {
-	s.cluster.ManagementToken = arvadostest.ManagementToken
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	pingReq := &RequestTester{
-		method:   "GET",
-		uri:      "/_health/ping",
-		apiToken: arvadostest.ManagementToken,
-	}
-	response := IssueHealthCheckRequest(s.handler, pingReq)
-	ExpectStatusCode(c,
-		"",
-		http.StatusOK,
-		response)
-	want := `{"health":"OK"}`
-	if !strings.Contains(response.Body.String(), want) {
-		c.Errorf("expected response to include %s: got %s", want, response.Body.String())
+func (s *keepstoreSuite) TestBlockWrite_SkipReadonly(c *C) {
+	c.Fail("todo")
+}
+
+func (s *keepstoreSuite) TestBlockTrash_SkipReadonly(c *C) {
+	c.Fail("todo")
+}
+
+func (s *keepstoreSuite) TestBlockRead_VolumeError503(c *C) {
+	c.Fail("todo: return 503 ")
+}
+
+func init() {
+	driver["stub"] = func(params newVolumeParams) (volume, error) {
+		v := &stubVolume{
+			params: params,
+			data:   make(map[string]stubData),
+		}
+		v.BlockRead = v.blockRead
+		v.BlockWrite = v.blockWrite
+		v.DeviceID = v.deviceID
+		v.BlockTouch = v.blockTouch
+		v.BlockTrash = v.blockTrash
+		v.BlockUntrash = v.blockUntrash
+		v.Index = v.index
+		v.Mtime = v.mtime
+		v.EmptyTrash = v.emptyTrash
+		return v, nil
 	}
 }
+
+type stubData struct {
+	mtime time.Time
+	data  []byte
+}
+
+type stubVolume struct {
+	params newVolumeParams
+	data   map[string]stubData
+	mtx    sync.Mutex
+
+	BlockRead    func(ctx context.Context, hash string, writeTo io.Writer) (int, error)
+	BlockWrite   func(ctx context.Context, hash string, data []byte) error
+	DeviceID     func() string
+	BlockTouch   func(hash string) error
+	BlockTrash   func(hash string) error
+	BlockUntrash func(hash string) error
+	Index        func(ctx context.Context, prefix string, writeTo io.Writer) error
+	Mtime        func(hash string) (time.Time, error)
+	EmptyTrash   func()
+}
+
+func (*stubVolume) blockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {}
+func (*stubVolume) blockWrite(ctx context.Context, hash string, data []byte) error             {}
+func (*stubVolume) deviceID() string                                                           {}
+func (*stubVolume) blockTouch(hash string) error                                               {}
+func (*stubVolume) blockTrash(hash string) error                                               {}
+func (*stubVolume) blockUntrash(hash string) error                                             {}
+func (*stubVolume) index(ctx context.Context, prefix string, writeTo io.Writer) error          {}
+func (*stubVolume) mtime(hash string) (time.Time, error)                                       {}
+func (*stubVolume) emptyTrash()                                                                {}
diff --git a/services/keepstore/mounts_test.go b/services/keepstore/mounts_test.go
index e8c248219f..69e31bfa41 100644
--- a/services/keepstore/mounts_test.go
+++ b/services/keepstore/mounts_test.go
@@ -15,18 +15,18 @@ import (
 	"git.arvados.org/arvados.git/sdk/go/ctxlog"
 	"git.arvados.org/arvados.git/sdk/go/httpserver"
 	"github.com/prometheus/client_golang/prometheus"
-	check "gopkg.in/check.v1"
+	. "gopkg.in/check.v1"
 )
 
-func (s *HandlerSuite) TestMounts(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *routerSuite) TestMounts(c *C) {
+	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), IsNil)
 
 	vols := s.handler.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
 	vols[1].Put(context.Background(), TestHash2, TestBlock2)
 
 	resp := s.call("GET", "/mounts", "", nil)
-	c.Check(resp.Code, check.Equals, http.StatusOK)
+	c.Check(resp.Code, Equals, http.StatusOK)
 	var mntList []struct {
 		UUID           string          `json:"uuid"`
 		DeviceID       string          `json:"device_id"`
@@ -36,67 +36,67 @@ func (s *HandlerSuite) TestMounts(c *check.C) {
 	}
 	c.Log(resp.Body.String())
 	err := json.Unmarshal(resp.Body.Bytes(), &mntList)
-	c.Assert(err, check.IsNil)
-	c.Assert(len(mntList), check.Equals, 2)
+	c.Assert(err, IsNil)
+	c.Assert(len(mntList), Equals, 2)
 	for _, m := range mntList {
-		c.Check(len(m.UUID), check.Equals, 27)
-		c.Check(m.UUID[:12], check.Equals, "zzzzz-nyw5e-")
-		c.Check(m.DeviceID, check.Equals, "mock-device-id")
-		c.Check(m.ReadOnly, check.Equals, false)
-		c.Check(m.Replication, check.Equals, 1)
-		c.Check(m.StorageClasses, check.DeepEquals, map[string]bool{"default": true})
+		c.Check(len(m.UUID), Equals, 27)
+		c.Check(m.UUID[:12], Equals, "zzzzz-nyw5e-")
+		c.Check(m.DeviceID, Equals, "mock-device-id")
+		c.Check(m.ReadOnly, Equals, false)
+		c.Check(m.Replication, Equals, 1)
+		c.Check(m.StorageClasses, DeepEquals, map[string]bool{"default": true})
 	}
-	c.Check(mntList[0].UUID, check.Not(check.Equals), mntList[1].UUID)
+	c.Check(mntList[0].UUID, Not(Equals), mntList[1].UUID)
 
 	// Bad auth
 	for _, tok := range []string{"", "xyzzy"} {
 		resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks", tok, nil)
-		c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
-		c.Check(resp.Body.String(), check.Equals, "Unauthorized\n")
+		c.Check(resp.Code, Equals, http.StatusUnauthorized)
+		c.Check(resp.Body.String(), Equals, "Unauthorized\n")
 	}
 
 	tok := arvadostest.SystemRootToken
 
 	// Nonexistent mount UUID
 	resp = s.call("GET", "/mounts/X/blocks", tok, nil)
-	c.Check(resp.Code, check.Equals, http.StatusNotFound)
-	c.Check(resp.Body.String(), check.Equals, "mount not found\n")
+	c.Check(resp.Code, Equals, http.StatusNotFound)
+	c.Check(resp.Body.String(), Equals, "mount not found\n")
 
 	// Complete index of first mount
 	resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks", tok, nil)
-	c.Check(resp.Code, check.Equals, http.StatusOK)
-	c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
 
 	// Partial index of first mount (one block matches prefix)
 	resp = s.call("GET", "/mounts/"+mntList[0].UUID+"/blocks?prefix="+TestHash[:2], tok, nil)
-	c.Check(resp.Code, check.Equals, http.StatusOK)
-	c.Check(resp.Body.String(), check.Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Matches, TestHash+`\+[0-9]+ [0-9]+\n\n`)
 
 	// Complete index of second mount (note trailing slash)
 	resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/", tok, nil)
-	c.Check(resp.Code, check.Equals, http.StatusOK)
-	c.Check(resp.Body.String(), check.Matches, TestHash2+`\+[0-9]+ [0-9]+\n\n`)
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Matches, TestHash2+`\+[0-9]+ [0-9]+\n\n`)
 
 	// Partial index of second mount (no blocks match prefix)
 	resp = s.call("GET", "/mounts/"+mntList[1].UUID+"/blocks/?prefix="+TestHash[:2], tok, nil)
-	c.Check(resp.Code, check.Equals, http.StatusOK)
-	c.Check(resp.Body.String(), check.Equals, "\n")
+	c.Check(resp.Code, Equals, http.StatusOK)
+	c.Check(resp.Body.String(), Equals, "\n")
 }
 
-func (s *HandlerSuite) TestMetrics(c *check.C) {
+func (s *routerSuite) TestMetrics(c *C) {
 	reg := prometheus.NewRegistry()
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", reg, testServiceURL), check.IsNil)
+	c.Assert(s.handler.setup(context.Background(), s.cluster, "", reg, testServiceURL), IsNil)
 	instrumented := httpserver.Instrument(reg, ctxlog.TestLogger(c), s.handler.Handler)
 	s.handler.Handler = instrumented.ServeAPI(s.cluster.ManagementToken, instrumented)
 
 	s.call("PUT", "/"+TestHash, "", TestBlock)
 	s.call("PUT", "/"+TestHash2, "", TestBlock2)
 	resp := s.call("GET", "/metrics.json", "", nil)
-	c.Check(resp.Code, check.Equals, http.StatusUnauthorized)
+	c.Check(resp.Code, Equals, http.StatusUnauthorized)
 	resp = s.call("GET", "/metrics.json", "foobar", nil)
-	c.Check(resp.Code, check.Equals, http.StatusForbidden)
+	c.Check(resp.Code, Equals, http.StatusForbidden)
 	resp = s.call("GET", "/metrics.json", arvadostest.ManagementToken, nil)
-	c.Check(resp.Code, check.Equals, http.StatusOK)
+	c.Check(resp.Code, Equals, http.StatusOK)
 	var j []struct {
 		Name   string
 		Help   string
@@ -119,7 +119,7 @@ func (s *HandlerSuite) TestMetrics(c *check.C) {
 		names[g.Name] = true
 		for _, m := range g.Metric {
 			if len(m.Label) == 2 && m.Label[0].Name == "code" && m.Label[0].Value == "200" && m.Label[1].Name == "method" && m.Label[1].Value == "put" {
-				c.Check(m.Summary.SampleCount, check.Equals, "2")
+				c.Check(m.Summary.SampleCount, Equals, "2")
 				found[g.Name] = true
 			}
 		}
@@ -137,11 +137,11 @@ func (s *HandlerSuite) TestMetrics(c *check.C) {
 	}
 	for _, m := range metricsNames {
 		_, ok := names[m]
-		c.Check(ok, check.Equals, true, check.Commentf("checking metric %q", m))
+		c.Check(ok, Equals, true, Commentf("checking metric %q", m))
 	}
 }
 
-func (s *HandlerSuite) call(method, path, tok string, body []byte) *httptest.ResponseRecorder {
+func (s *routerSuite) call(method, path, tok string, body []byte) *httptest.ResponseRecorder {
 	resp := httptest.NewRecorder()
 	req, _ := http.NewRequest(method, path, bytes.NewReader(body))
 	if tok != "" {
diff --git a/services/keepstore/pull_worker_integration_test.go b/services/keepstore/pull_worker_integration_test.go
deleted file mode 100644
index b445f2f082..0000000000
--- a/services/keepstore/pull_worker_integration_test.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package keepstore
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	"io"
-	"io/ioutil"
-	"strings"
-
-	"git.arvados.org/arvados.git/sdk/go/arvadostest"
-	"git.arvados.org/arvados.git/sdk/go/keepclient"
-	"github.com/prometheus/client_golang/prometheus"
-	check "gopkg.in/check.v1"
-)
-
-type PullWorkIntegrationTestData struct {
-	Name     string
-	Locator  string
-	Content  string
-	GetError string
-}
-
-func (s *HandlerSuite) setupPullWorkerIntegrationTest(c *check.C, testData PullWorkIntegrationTestData, wantData bool) pullListItem {
-	arvadostest.StartKeep(2, false)
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	// Put content if the test needs it
-	if wantData {
-		locator, _, err := s.handler.keepClient.PutB([]byte(testData.Content))
-		if err != nil {
-			c.Errorf("Error putting test data in setup for %s %s %v", testData.Content, locator, err)
-		}
-		if locator == "" {
-			c.Errorf("No locator found after putting test data")
-		}
-	}
-
-	return pullListItem{
-		Locator: testData.Locator,
-	}
-}
-
-// Do a get on a block that is not existing in any of the keep servers.
-// Expect "block not found" error.
-func (s *HandlerSuite) TestPullWorkerIntegration_GetNonExistingLocator(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	testData := PullWorkIntegrationTestData{
-		Name:     "TestPullWorkerIntegration_GetLocator",
-		Locator:  "5d41402abc4b2a76b9719d911017c592",
-		Content:  "hello",
-		GetError: "Block not found",
-	}
-
-	pullRequest := s.setupPullWorkerIntegrationTest(c, testData, false)
-	defer arvadostest.StopKeep(2)
-
-	s.performPullWorkerIntegrationTest(testData, pullRequest, c)
-}
-
-// Do a get on a block that exists on one of the keep servers.
-// The setup method will create this block before doing the get.
-func (s *HandlerSuite) TestPullWorkerIntegration_GetExistingLocator(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	testData := PullWorkIntegrationTestData{
-		Name:     "TestPullWorkerIntegration_GetLocator",
-		Locator:  "5d41402abc4b2a76b9719d911017c592",
-		Content:  "hello",
-		GetError: "",
-	}
-
-	pullRequest := s.setupPullWorkerIntegrationTest(c, testData, true)
-	defer arvadostest.StopKeep(2)
-
-	s.performPullWorkerIntegrationTest(testData, pullRequest, c)
-}
-
-// Perform the test.
-// The test directly invokes the "PullItemAndProcess" rather than
-// putting an item on the pullq so that the errors can be verified.
-func (s *HandlerSuite) performPullWorkerIntegrationTest(testData PullWorkIntegrationTestData, item pullListItem, c *check.C) {
-
-	// Override writePulledBlock to mock PutBlock functionality
-	defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
-	writePulledBlock = func(_ *RRVolumeManager, _ Volume, content []byte, _ string) error {
-		c.Check(string(content), check.Equals, testData.Content)
-		return nil
-	}
-
-	// Override GetContent to mock keepclient Get functionality
-	defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
-		GetContent = orig
-	}(GetContent)
-	GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
-		if testData.GetError != "" {
-			return nil, 0, "", errors.New(testData.GetError)
-		}
-		rdr := ioutil.NopCloser(bytes.NewBufferString(testData.Content))
-		return rdr, int64(len(testData.Content)), "", nil
-	}
-
-	err := s.handler.pullItemAndProcess(item)
-
-	if len(testData.GetError) > 0 {
-		if (err == nil) || (!strings.Contains(err.Error(), testData.GetError)) {
-			c.Errorf("Got error %v, expected %v", err, testData.GetError)
-		}
-	} else {
-		if err != nil {
-			c.Errorf("Got error %v, expected nil", err)
-		}
-	}
-}
diff --git a/services/keepstore/pull_worker_test.go b/services/keepstore/pull_worker_test.go
index fa11d47508..1fae8261a2 100644
--- a/services/keepstore/pull_worker_test.go
+++ b/services/keepstore/pull_worker_test.go
@@ -5,311 +5,24 @@
 package keepstore
 
 import (
-	"bytes"
-	"context"
-	"errors"
-	"io"
-	"io/ioutil"
-	"net/http"
-	"time"
-
-	"git.arvados.org/arvados.git/sdk/go/arvados"
-	"git.arvados.org/arvados.git/sdk/go/keepclient"
-	"github.com/prometheus/client_golang/prometheus"
 	. "gopkg.in/check.v1"
-	check "gopkg.in/check.v1"
 )
 
-var _ = Suite(&PullWorkerTestSuite{})
-
-type PullWorkerTestSuite struct {
-	cluster *arvados.Cluster
-	handler *router
-
-	testPullLists map[string]string
-	readContent   string
-	readError     error
-	putContent    []byte
-	putError      error
-}
-
-func (s *PullWorkerTestSuite) SetUpTest(c *C) {
-	s.cluster = testCluster(c)
-	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-000000000000000": {Driver: "mock"},
-		"zzzzz-nyw5e-111111111111111": {Driver: "mock"},
-	}
-	s.cluster.Collections.BlobReplicateConcurrency = 1
-
-	s.handler = &handler{}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
+func (s *routerSuite) TestPullList_Clear(c *C) {
+	router, cancel := testRouter(c, s.cluster)
+	defer cancel()
 
-	s.readContent = ""
-	s.readError = nil
-	s.putContent = []byte{}
-	s.putError = nil
-
-	// When a new pull request arrives, the old one will be overwritten.
-	// This behavior is verified using these two maps in the
-	// "TestPullWorkerPullList_with_two_items_latest_replacing_old"
-	s.testPullLists = make(map[string]string)
-}
-
-var firstPullList = []byte(`[
-		{
-			"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
-			"servers":[
-				"server_1",
-				"server_2"
-		 	]
-		},{
-			"locator":"37b51d194a7513e45b56f6524f2d51f2+3",
-			"servers":[
-				"server_3"
-		 	]
-		}
-	]`)
-
-var secondPullList = []byte(`[
-		{
-			"locator":"73feffa4b7f6bb68e44cf984c85f6e88+3",
-			"servers":[
-				"server_1",
-				"server_2"
-		 	]
-		}
-	]`)
-
-type PullWorkerTestData struct {
-	name         string
-	req          RequestTester
-	responseCode int
-	responseBody string
-	readContent  string
-	readError    bool
-	putError     bool
+	c.Fatal("todo")
 }
 
-// Ensure MountUUID in a pull list is correctly translated to a Volume
-// argument passed to writePulledBlock().
-func (s *PullWorkerTestSuite) TestSpecifyMountUUID(c *C) {
-	defer func(f func(*RRVolumeManager, Volume, []byte, string) error) {
-		writePulledBlock = f
-	}(writePulledBlock)
-	pullq := s.handler.Handler.(*router).pullq
-
-	for _, spec := range []struct {
-		sendUUID     string
-		expectVolume Volume
-	}{
-		{
-			sendUUID:     "",
-			expectVolume: nil,
-		},
-		{
-			sendUUID:     s.handler.volmgr.Mounts()[0].UUID,
-			expectVolume: s.handler.volmgr.Mounts()[0].Volume,
-		},
-	} {
-		writePulledBlock = func(_ *RRVolumeManager, v Volume, _ []byte, _ string) error {
-			c.Check(v, Equals, spec.expectVolume)
-			return nil
-		}
-
-		resp := IssueRequest(s.handler, &RequestTester{
-			uri:      "/pull",
-			apiToken: s.cluster.SystemRootToken,
-			method:   "PUT",
-			requestBody: []byte(`[{
-				"locator":"acbd18db4cc2f85cedef654fccc4a4d8+3",
-				"servers":["server_1","server_2"],
-				"mount_uuid":"` + spec.sendUUID + `"}]`),
-		})
-		c.Assert(resp.Code, Equals, http.StatusOK)
-		expectEqualWithin(c, time.Second, 0, func() interface{} {
-			st := pullq.Status()
-			return st.InProgress + st.Queued
-		})
-	}
-}
-
-func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_two_locators(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorkerPullList_with_two_locators",
-		req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
-		responseCode: http.StatusOK,
-		responseBody: "Received 2 pull requests\n",
-		readContent:  "hello",
-		readError:    false,
-		putError:     false,
-	}
-
-	s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorkerPullList_with_one_locator(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorkerPullList_with_one_locator",
-		req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
-		responseCode: http.StatusOK,
-		responseBody: "Received 1 pull requests\n",
-		readContent:  "hola",
-		readError:    false,
-		putError:     false,
-	}
-
-	s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_one_locator(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorker_error_on_get_one_locator",
-		req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
-		responseCode: http.StatusOK,
-		responseBody: "Received 1 pull requests\n",
-		readContent:  "unused",
-		readError:    true,
-		putError:     false,
-	}
-
-	s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_get_two_locators(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorker_error_on_get_two_locators",
-		req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
-		responseCode: http.StatusOK,
-		responseBody: "Received 2 pull requests\n",
-		readContent:  "unused",
-		readError:    true,
-		putError:     false,
-	}
-
-	s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_one_locator(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorker_error_on_put_one_locator",
-		req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", secondPullList, ""},
-		responseCode: http.StatusOK,
-		responseBody: "Received 1 pull requests\n",
-		readContent:  "hello hello",
-		readError:    false,
-		putError:     true,
-	}
-
-	s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) TestPullWorker_error_on_put_two_locators(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorker_error_on_put_two_locators",
-		req:          RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", firstPullList, ""},
-		responseCode: http.StatusOK,
-		responseBody: "Received 2 pull requests\n",
-		readContent:  "hello again",
-		readError:    false,
-		putError:     true,
-	}
-
-	s.performTest(testData, c)
-}
-
-// In this case, the item will not be placed on pullq
-func (s *PullWorkerTestSuite) TestPullWorker_invalidToken(c *C) {
-	testData := PullWorkerTestData{
-		name:         "TestPullWorkerPullList_with_two_locators",
-		req:          RequestTester{"/pull", "invalidToken", "PUT", firstPullList, ""},
-		responseCode: http.StatusUnauthorized,
-		responseBody: "Unauthorized\n",
-		readContent:  "hello",
-		readError:    false,
-		putError:     false,
-	}
-
-	s.performTest(testData, c)
-}
-
-func (s *PullWorkerTestSuite) performTest(testData PullWorkerTestData, c *C) {
-	pullq := s.handler.Handler.(*router).pullq
-
-	s.testPullLists[testData.name] = testData.responseBody
-
-	processedPullLists := make(map[string]string)
-
-	// Override GetContent to mock keepclient Get functionality
-	defer func(orig func(string, *keepclient.KeepClient) (io.ReadCloser, int64, string, error)) {
-		GetContent = orig
-	}(GetContent)
-	GetContent = func(signedLocator string, keepClient *keepclient.KeepClient) (reader io.ReadCloser, contentLength int64, url string, err error) {
-		c.Assert(getStatusItem(s.handler, "PullQueue", "InProgress"), Equals, float64(1))
-		processedPullLists[testData.name] = testData.responseBody
-		if testData.readError {
-			err = errors.New("Error getting data")
-			s.readError = err
-			return
-		}
-		s.readContent = testData.readContent
-		reader = ioutil.NopCloser(bytes.NewBufferString(testData.readContent))
-		contentLength = int64(len(testData.readContent))
-		return
-	}
-
-	// Override writePulledBlock to mock PutBlock functionality
-	defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
-	writePulledBlock = func(_ *RRVolumeManager, v Volume, content []byte, locator string) error {
-		if testData.putError {
-			s.putError = errors.New("Error putting data")
-			return s.putError
-		}
-		s.putContent = content
-		return nil
-	}
-
-	c.Check(getStatusItem(s.handler, "PullQueue", "InProgress"), Equals, float64(0))
-	c.Check(getStatusItem(s.handler, "PullQueue", "Queued"), Equals, float64(0))
-	c.Check(getStatusItem(s.handler, "Version"), Not(Equals), "")
-
-	response := IssueRequest(s.handler, &testData.req)
-	c.Assert(response.Code, Equals, testData.responseCode)
-	c.Assert(response.Body.String(), Equals, testData.responseBody)
-
-	expectEqualWithin(c, time.Second, 0, func() interface{} {
-		st := pullq.Status()
-		return st.InProgress + st.Queued
-	})
-
-	if testData.name == "TestPullWorkerPullList_with_two_items_latest_replacing_old" {
-		c.Assert(len(s.testPullLists), Equals, 2)
-		c.Assert(len(processedPullLists), Equals, 1)
-		c.Assert(s.testPullLists["Added_before_actual_test_item"], NotNil)
-		c.Assert(s.testPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
-		c.Assert(processedPullLists["TestPullWorkerPullList_with_two_items_latest_replacing_old"], NotNil)
-	} else {
-		if testData.responseCode == http.StatusOK {
-			c.Assert(len(s.testPullLists), Equals, 1)
-			c.Assert(len(processedPullLists), Equals, 1)
-			c.Assert(s.testPullLists[testData.name], NotNil)
-		} else {
-			c.Assert(len(s.testPullLists), Equals, 1)
-			c.Assert(len(processedPullLists), Equals, 0)
-		}
-	}
-
-	if testData.readError {
-		c.Assert(s.readError, NotNil)
-	} else if testData.responseCode == http.StatusOK {
-		c.Assert(s.readError, IsNil)
-		c.Assert(s.readContent, Equals, testData.readContent)
-		if testData.putError {
-			c.Assert(s.putError, NotNil)
-		} else {
-			c.Assert(s.putError, IsNil)
-			c.Assert(string(s.putContent), Equals, testData.readContent)
-		}
-	}
+func (s *routerSuite) TestPullList_Execute(c *C) {
+	router, cancel := testRouter(c, s.cluster)
+	defer cancel()
 
-	expectChannelEmpty(c, pullq.NextItem)
+	c.Fatal("todo: pull available block to unspecified volume")
+	c.Fatal("todo: pull available block to specified volume")
+	c.Fatal("todo: log error when block not found on remote")
+	c.Fatal("todo: log error connecting to remote")
+	c.Fatal("todo: log error writing block to local mount")
+	c.Fatal("todo: log error when destination mount does not exist")
 }
diff --git a/services/keepstore/router.go b/services/keepstore/router.go
index 2ad39716b9..f741610ca9 100644
--- a/services/keepstore/router.go
+++ b/services/keepstore/router.go
@@ -55,6 +55,8 @@ func newRouter(keepstore *keepstore, puller *puller, trasher *trasher) service.H
 	put.HandleFunc(`/untrash`+locatorPath, adminonly(rtr.handleUntrash))
 	touch := r.Methods("TOUCH").Subrouter()
 	touch.HandleFunc(locatorPath, adminonly(rtr.handleBlockTouch))
+	delete := r.Methods(http.MethodDelete).Subrouter()
+	delete.HandleFunc(locatorPath, adminonly(rtr.handleBlockTrash))
 	r.NotFoundHandler = http.HandlerFunc(rtr.handleBadRequest)
 	rtr.Handler = auth.LoadToken(r)
 	return rtr
@@ -122,10 +124,12 @@ func (rtr *router) handleBlockWrite(w http.ResponseWriter, req *http.Request) {
 
 func (rtr *router) handleBlockTouch(w http.ResponseWriter, req *http.Request) {
 	err := rtr.keepstore.BlockTouch(req.Context(), mux.Vars(req)["locator"])
-	if err != nil {
-		rtr.handleError(w, req, err)
-		return
-	}
+	rtr.handleError(w, req, err)
+}
+
+func (rtr *router) handleBlockTrash(w http.ResponseWriter, req *http.Request) {
+	err := rtr.keepstore.BlockTrash(req.Context(), mux.Vars(req)["locator"])
+	rtr.handleError(w, req, err)
 }
 
 func (rtr *router) handleMounts(w http.ResponseWriter, req *http.Request) {
@@ -186,10 +190,7 @@ func (rtr *router) handleTrashList(w http.ResponseWriter, req *http.Request) {
 
 func (rtr *router) handleUntrash(w http.ResponseWriter, req *http.Request) {
 	err := rtr.keepstore.BlockUntrash(req.Context(), mux.Vars(req)["locator"])
-	if err != nil {
-		rtr.handleError(w, req, err)
-		return
-	}
+	rtr.handleError(w, req, err)
 }
 
 func (rtr *router) handleBadRequest(w http.ResponseWriter, req *http.Request) {
@@ -201,7 +202,9 @@ func (rtr *router) handleError(w http.ResponseWriter, req *http.Request, err err
 		w.WriteHeader(499)
 		return
 	}
-	if os.IsNotExist(err) {
+	if err == nil {
+		return
+	} else if os.IsNotExist(err) {
 		w.WriteHeader(http.StatusNotFound)
 	} else if statusErr := interface{ HTTPStatus() int }(nil); errors.As(err, &statusErr) {
 		w.WriteHeader(statusErr.HTTPStatus())
diff --git a/services/keepstore/router_test.go b/services/keepstore/router_test.go
index 73cddcff26..def19d315e 100644
--- a/services/keepstore/router_test.go
+++ b/services/keepstore/router_test.go
@@ -5,1392 +5,137 @@
 package keepstore
 
 import (
-	"bytes"
 	"context"
-	"encoding/json"
-	"fmt"
 	"net/http"
 	"net/http/httptest"
-	"os"
-	"sort"
 	"strings"
-	"sync/atomic"
-	"time"
 
-	"git.arvados.org/arvados.git/lib/config"
 	"git.arvados.org/arvados.git/sdk/go/arvados"
 	"git.arvados.org/arvados.git/sdk/go/arvadostest"
-	"git.arvados.org/arvados.git/sdk/go/ctxlog"
-	"github.com/prometheus/client_golang/prometheus"
-	check "gopkg.in/check.v1"
+	. "gopkg.in/check.v1"
 )
 
-var testServiceURL = func() arvados.URL {
-	return arvados.URL{Host: "localhost:12345", Scheme: "http"}
-}()
-
-func testCluster(t TB) *arvados.Cluster {
-	cfg, err := config.NewLoader(bytes.NewBufferString("Clusters: {zzzzz: {}}"), ctxlog.TestLogger(t)).Load()
-	if err != nil {
-		t.Fatal(err)
-	}
-	cluster, err := cfg.GetCluster("")
-	if err != nil {
-		t.Fatal(err)
-	}
-	cluster.SystemRootToken = arvadostest.SystemRootToken
-	cluster.ManagementToken = arvadostest.ManagementToken
-	cluster.Collections.BlobSigning = false
-	return cluster
+func testRouter(t TB, cluster *arvados.Cluster) (*router, context.CancelFunc) {
+	ks, cancel := testKeepstore(t, cluster)
+	puller := newPuller(ks, reg)
+	trasher := newTrasher(ks, reg)
+	return newRouter(ks, puller, trasher), cancel
 }
 
-var _ = check.Suite(&HandlerSuite{})
+var _ = Suite(&routerSuite{})
 
-type RouterSuite struct {
+type routerSuite struct {
 	cluster *arvados.Cluster
-	handler *router
 }
 
-func (s *RouterSuite) SetUpTest(c *check.C) {
+func (s *routerSuite) SetUpTest(c *C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
 	}
-	s.handler = newHandlerOrErrorHandler(context.Background(), s.cluster, s.cluster.SystemRootToken, prometheus.NewRegistry()).(*router)
-}
-
-// A RequestTester represents the parameters for an HTTP request to
-// be issued on behalf of a unit test.
-type RequestTester struct {
-	uri            string
-	apiToken       string
-	method         string
-	requestBody    []byte
-	storageClasses string
 }
 
-// Test GetBlockHandler on the following situations:
-//   - permissions off, unauthenticated request, unsigned locator
-//   - permissions on, authenticated request, signed locator
-//   - permissions on, authenticated request, unsigned locator
-//   - permissions on, unauthenticated request, signed locator
-//   - permissions on, authenticated request, expired locator
-//   - permissions on, authenticated request, signed locator, transient error from backend
-func (s *RouterSuite) TestGetHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	vols := s.handler.volmgr.AllWritable()
-	err := vols[0].Put(context.Background(), TestHash, TestBlock)
-	c.Check(err, check.IsNil)
-
-	// Create locators for testing.
-	// Turn on permission settings so we can generate signed locators.
-	s.cluster.Collections.BlobSigning = true
-	s.cluster.Collections.BlobSigningKey = knownKey
-	s.cluster.Collections.BlobSigningTTL.Set("5m")
-
-	var (
-		unsignedLocator  = "/" + TestHash
-		validTimestamp   = time.Now().Add(s.cluster.Collections.BlobSigningTTL.Duration())
-		expiredTimestamp = time.Now().Add(-time.Hour)
-		signedLocator    = "/" + SignLocator(s.cluster, TestHash, knownToken, validTimestamp)
-		expiredLocator   = "/" + SignLocator(s.cluster, TestHash, knownToken, expiredTimestamp)
-	)
-
-	// -----------------
-	// Test unauthenticated request with permissions off.
-	s.cluster.Collections.BlobSigning = false
-
-	// Unauthenticated request, unsigned locator
-	// => OK
-	response := IssueRequest(s.handler,
-		&RequestTester{
-			method: "GET",
-			uri:    unsignedLocator,
-		})
-	ExpectStatusCode(c,
-		"Unauthenticated request, unsigned locator", http.StatusOK, response)
-	ExpectBody(c,
-		"Unauthenticated request, unsigned locator",
-		string(TestBlock),
-		response)
-
-	receivedLen := response.Header().Get("Content-Length")
-	expectedLen := fmt.Sprintf("%d", len(TestBlock))
-	if receivedLen != expectedLen {
-		c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
-	}
-
-	// ----------------
-	// Permissions: on.
-	s.cluster.Collections.BlobSigning = true
-
-	// Authenticated request, signed locator
-	// => OK
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      signedLocator,
-		apiToken: knownToken,
-	})
-	ExpectStatusCode(c,
-		"Authenticated request, signed locator", http.StatusOK, response)
-	ExpectBody(c,
-		"Authenticated request, signed locator", string(TestBlock), response)
-
-	receivedLen = response.Header().Get("Content-Length")
-	expectedLen = fmt.Sprintf("%d", len(TestBlock))
-	if receivedLen != expectedLen {
-		c.Errorf("expected Content-Length %s, got %s", expectedLen, receivedLen)
-	}
-
-	// Authenticated request, unsigned locator
-	// => PermissionError
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      unsignedLocator,
-		apiToken: knownToken,
-	})
-	ExpectStatusCode(c, "unsigned locator", PermissionError.HTTPCode, response)
-
-	// Unauthenticated request, signed locator
-	// => PermissionError
-	response = IssueRequest(s.handler, &RequestTester{
-		method: "GET",
-		uri:    signedLocator,
-	})
-	ExpectStatusCode(c,
-		"Unauthenticated request, signed locator",
-		PermissionError.HTTPCode, response)
-
-	// Authenticated request, expired locator
-	// => ExpiredError
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      expiredLocator,
-		apiToken: knownToken,
-	})
-	ExpectStatusCode(c,
-		"Authenticated request, expired locator",
-		ExpiredError.HTTPCode, response)
-
-	// Authenticated request, signed locator
-	// => 503 Server busy (transient error)
-
-	// Set up the block owning volume to respond with errors
-	vols[0].Volume.(*MockVolume).Bad = true
-	vols[0].Volume.(*MockVolume).BadVolumeError = VolumeBusyError
-	response = IssueRequest(s.handler, &RequestTester{
-		method:   "GET",
-		uri:      signedLocator,
-		apiToken: knownToken,
-	})
-	// A transient error from one volume while the other doesn't find the block
-	// should make the service return a 503 so that clients can retry.
-	ExpectStatusCode(c,
-		"Volume backend busy",
-		503, response)
-}
-
-// Test PutBlockHandler on the following situations:
-//   - no server key
-//   - with server key, authenticated request, unsigned locator
-//   - with server key, unauthenticated request, unsigned locator
-func (s *RouterSuite) TestPutHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	// --------------
-	// No server key.
-
-	s.cluster.Collections.BlobSigningKey = ""
-
-	// Unauthenticated request, no server key
-	// => OK (unsigned response)
-	unsignedLocator := "/" + TestHash
-	response := IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         unsignedLocator,
-			requestBody: TestBlock,
-		})
-
-	ExpectStatusCode(c,
-		"Unauthenticated request, no server key", http.StatusOK, response)
-	ExpectBody(c,
-		"Unauthenticated request, no server key",
-		TestHashPutResp, response)
-
-	// ------------------
-	// With a server key.
-
-	s.cluster.Collections.BlobSigningKey = knownKey
-	s.cluster.Collections.BlobSigningTTL.Set("5m")
-
-	// When a permission key is available, the locator returned
-	// from an authenticated PUT request will be signed.
-
-	// Authenticated PUT, signed locator
-	// => OK (signed response)
-	response = IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         unsignedLocator,
-			requestBody: TestBlock,
-			apiToken:    knownToken,
-		})
-
-	ExpectStatusCode(c,
-		"Authenticated PUT, signed locator, with server key",
-		http.StatusOK, response)
-	responseLocator := strings.TrimSpace(response.Body.String())
-	if VerifySignature(s.cluster, responseLocator, knownToken) != nil {
-		c.Errorf("Authenticated PUT, signed locator, with server key:\n"+
-			"response '%s' does not contain a valid signature",
-			responseLocator)
-	}
-
-	// Unauthenticated PUT, unsigned locator
-	// => OK
-	response = IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         unsignedLocator,
-			requestBody: TestBlock,
-		})
-
-	ExpectStatusCode(c,
-		"Unauthenticated PUT, unsigned locator, with server key",
-		http.StatusOK, response)
-	ExpectBody(c,
-		"Unauthenticated PUT, unsigned locator, with server key",
-		TestHashPutResp, response)
-}
-
-func (s *RouterSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
-	s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	s.cluster.SystemRootToken = "fake-data-manager-token"
-	IssueRequest(s.handler,
-		&RequestTester{
-			method:      "PUT",
-			uri:         "/" + TestHash,
-			requestBody: TestBlock,
-		})
-
-	s.cluster.Collections.BlobTrash = true
-	IssueRequest(s.handler,
-		&RequestTester{
-			method:      "DELETE",
-			uri:         "/" + TestHash,
-			requestBody: TestBlock,
-			apiToken:    s.cluster.SystemRootToken,
-		})
-	type expect struct {
-		volid     string
-		method    string
-		callcount int
-	}
-	for _, e := range []expect{
-		{"zzzzz-nyw5e-000000000000000", "Get", 0},
-		{"zzzzz-nyw5e-000000000000000", "Compare", 0},
-		{"zzzzz-nyw5e-000000000000000", "Touch", 0},
-		{"zzzzz-nyw5e-000000000000000", "Put", 0},
-		{"zzzzz-nyw5e-000000000000000", "Delete", 0},
-		{"zzzzz-nyw5e-111111111111111", "Get", 0},
-		{"zzzzz-nyw5e-111111111111111", "Compare", 1},
-		{"zzzzz-nyw5e-111111111111111", "Touch", 1},
-		{"zzzzz-nyw5e-111111111111111", "Put", 1},
-		{"zzzzz-nyw5e-111111111111111", "Delete", 1},
-	} {
-		if calls := s.handler.volmgr.mountMap[e.volid].Volume.(*MockVolume).CallCount(e.method); calls != e.callcount {
-			c.Errorf("Got %d %s() on vol %s, expect %d", calls, e.method, e.volid, e.callcount)
-		}
-	}
-}
-
-func (s *RouterSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
-	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-111111111111111": {
-			Driver:         "mock",
-			Replication:    1,
-			StorageClasses: map[string]bool{"class1": true}},
-		"zzzzz-nyw5e-222222222222222": {
-			Driver:         "mock",
-			Replication:    1,
-			StorageClasses: map[string]bool{"class2": true, "class3": true}},
-	}
-
-	for _, trial := range []struct {
-		priority1 int // priority of class1, thus vol1
-		priority2 int // priority of class2
-		priority3 int // priority of class3 (vol2 priority will be max(priority2, priority3))
-		get1      int // expected number of "get" ops on vol1
-		get2      int // expected number of "get" ops on vol2
-	}{
-		{100, 50, 50, 1, 0},   // class1 has higher priority => try vol1 first, no need to try vol2
-		{100, 100, 100, 1, 0}, // same priority, vol1 is first lexicographically => try vol1 first and succeed
-		{66, 99, 33, 1, 1},    // class2 has higher priority => try vol2 first, then try vol1
-		{66, 33, 99, 1, 1},    // class3 has highest priority => vol2 has highest => try vol2 first, then try vol1
-	} {
-		c.Logf("%+v", trial)
-		s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
-			"class1": {Priority: trial.priority1},
-			"class2": {Priority: trial.priority2},
-			"class3": {Priority: trial.priority3},
-		}
-		c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-		IssueRequest(s.handler,
-			&RequestTester{
-				method:         "PUT",
-				uri:            "/" + TestHash,
-				requestBody:    TestBlock,
-				storageClasses: "class1",
-			})
-		IssueRequest(s.handler,
-			&RequestTester{
-				method: "GET",
-				uri:    "/" + TestHash,
-			})
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get1)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Get"), check.Equals, trial.get2)
-	}
-}
-
-func (s *RouterSuite) TestPutWithNoWritableVolumes(c *check.C) {
-	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-111111111111111": {
-			Driver:         "mock",
-			Replication:    1,
-			ReadOnly:       true,
-			StorageClasses: map[string]bool{"class1": true}},
-	}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	resp := IssueRequest(s.handler,
-		&RequestTester{
-			method:         "PUT",
-			uri:            "/" + TestHash,
-			requestBody:    TestBlock,
-			storageClasses: "class1",
-		})
-	c.Check(resp.Code, check.Equals, FullError.HTTPCode)
-	c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
-}
-
-func (s *RouterSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
-	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-111111111111111": {
-			Driver:         "mock",
-			Replication:    1,
-			StorageClasses: map[string]bool{"class1": true}},
-		"zzzzz-nyw5e-121212121212121": {
-			Driver:         "mock",
-			Replication:    1,
-			StorageClasses: map[string]bool{"class1": true, "class2": true}},
-		"zzzzz-nyw5e-222222222222222": {
-			Driver:         "mock",
-			Replication:    1,
-			StorageClasses: map[string]bool{"class2": true}},
-	}
-
-	for _, trial := range []struct {
-		setCounter uint32 // value to stuff vm.counter, to control offset
-		classes    string // desired classes
-		put111     int    // expected number of "put" ops on 11111... after 2x put reqs
-		put121     int    // expected number of "put" ops on 12121...
-		put222     int    // expected number of "put" ops on 22222...
-		cmp111     int    // expected number of "compare" ops on 11111... after 2x put reqs
-		cmp121     int    // expected number of "compare" ops on 12121...
-		cmp222     int    // expected number of "compare" ops on 22222...
-	}{
-		{0, "class1",
-			1, 0, 0,
-			2, 1, 0}, // first put compares on all vols with class2; second put succeeds after checking 121
-		{0, "class2",
-			0, 1, 0,
-			0, 2, 1}, // first put compares on all vols with class2; second put succeeds after checking 121
-		{0, "class1,class2",
-			1, 1, 0,
-			2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
-		{1, "class1,class2",
-			0, 1, 0, // vm.counter offset is 1 so the first volume attempted is 121
-			2, 2, 1}, // first put compares on all vols; second put succeeds after checking 111 and 121
-		{0, "class1,class2,class404",
-			1, 1, 0,
-			2, 2, 1}, // first put compares on all vols; second put doesn't compare on 222 because it already satisfied class2 on 121
+func (s *routerSuite) TestBlockRead_Token(c *C) {
+	router, cancel := testRouter(c, s.cluster)
+	defer cancel()
+
+	const fooHash = "acbd18db4cc2f85cedef654fccc4a4d8"
+
+	err := s.keepstore.mountsW[0].BlockWrite(context.Background(), fooHash, []byte("foo"))
+	c.Assert(err, IsNil)
+	locSigned := s.keepstore.signLocator(arvadostest.ActiveTokenV2, fooHash+"+3")
+	c.Assert(locSigned, Not(Equals), fooHash+"+3")
+
+	var req *http.Request
+	var resp httptest.ResponseRecorder
+
+	// No token provided
+	req = httptest.NewRequest("GET", "http://example/"+locSigned, nil)
+	resp = httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusUnauthorized)
+	c.Check(string(resp.Bytes()), Matches, "no token provided")
+
+	// Different token => invalid signature
+	req = httptest.NewRequest("GET", "http://example/"+locSigned, nil)
+	req.Header.Set("Authorization", "Bearer badtoken")
+	resp = httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusBadRequest)
+	c.Check(string(resp.Bytes()), Matches, "invalid signature")
+
+	// Correct token
+	req = httptest.NewRequest("GET", "http://example/"+locSigned, nil)
+	req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
+	resp = httptest.NewRecorder()
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusOK)
+	c.Check(string(resp.Bytes()), Equals, "foo")
+}
+
+func (s *routerSuite) TestBadRequest(c *C) {
+	router, cancel := testRouter(c, s.cluster)
+	defer cancel()
+
+	for _, trial := range []string{
+		"GET /",
+		"GET /xyz",
+		"GET /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabcdefg",
+		"GET /untrash",
+		"GET /mounts/blocks/123",
+		"GET /trash",
+		"GET /pull",
+		"POST /",
+		"POST /aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+		"POST /trash",
+		"PROPFIND /",
+		"MAKE-COFFEE /",
 	} {
-		c.Logf("%+v", trial)
-		s.cluster.StorageClasses = map[string]arvados.StorageClassConfig{
-			"class1": {},
-			"class2": {},
-			"class3": {},
-		}
-		c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-		atomic.StoreUint32(&s.handler.volmgr.counter, trial.setCounter)
-		for i := 0; i < 2; i++ {
-			IssueRequest(s.handler,
-				&RequestTester{
-					method:         "PUT",
-					uri:            "/" + TestHash,
-					requestBody:    TestBlock,
-					storageClasses: trial.classes,
-				})
-		}
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put111)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put121)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Put"), check.Equals, trial.put222)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp111)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-121212121212121"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp121)
-		c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-222222222222222"].Volume.(*MockVolume).CallCount("Compare"), check.Equals, trial.cmp222)
-	}
-}
-
-// Test TOUCH requests.
-func (s *RouterSuite) TestTouchHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	vols := s.handler.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	vols[0].Volume.(*MockVolume).TouchWithDate(TestHash, time.Now().Add(-time.Hour))
-	afterPut := time.Now()
-	t, err := vols[0].Mtime(TestHash)
-	c.Assert(err, check.IsNil)
-	c.Assert(t.Before(afterPut), check.Equals, true)
-
-	ExpectStatusCode(c,
-		"touch with no credentials",
-		http.StatusUnauthorized,
-		IssueRequest(s.handler, &RequestTester{
-			method: "TOUCH",
-			uri:    "/" + TestHash,
-		}))
-
-	ExpectStatusCode(c,
-		"touch with non-root credentials",
-		http.StatusUnauthorized,
-		IssueRequest(s.handler, &RequestTester{
-			method:   "TOUCH",
-			uri:      "/" + TestHash,
-			apiToken: arvadostest.ActiveTokenV2,
-		}))
-
-	ExpectStatusCode(c,
-		"touch non-existent block",
-		http.StatusNotFound,
-		IssueRequest(s.handler, &RequestTester{
-			method:   "TOUCH",
-			uri:      "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
-			apiToken: s.cluster.SystemRootToken,
-		}))
-
-	beforeTouch := time.Now()
-	ExpectStatusCode(c,
-		"touch block",
-		http.StatusOK,
-		IssueRequest(s.handler, &RequestTester{
-			method:   "TOUCH",
-			uri:      "/" + TestHash,
-			apiToken: s.cluster.SystemRootToken,
-		}))
-	t, err = vols[0].Mtime(TestHash)
-	c.Assert(err, check.IsNil)
-	c.Assert(t.After(beforeTouch), check.Equals, true)
-}
-
-// Test /index requests:
-//   - unauthenticated /index request
-//   - unauthenticated /index/prefix request
-//   - authenticated   /index request        | non-superuser
-//   - authenticated   /index/prefix request | non-superuser
-//   - authenticated   /index request        | superuser
-//   - authenticated   /index/prefix request | superuser
-//
-// The only /index requests that should succeed are those issued by the
-// superuser. They should pass regardless of the value of BlobSigning.
-func (s *RouterSuite) TestIndexHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	// Include multiple blocks on different volumes, and
-	// some metadata files (which should be omitted from index listings)
-	vols := s.handler.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	vols[1].Put(context.Background(), TestHash2, TestBlock2)
-	vols[0].Put(context.Background(), TestHash+".meta", []byte("metadata"))
-	vols[1].Put(context.Background(), TestHash2+".meta", []byte("metadata"))
-
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	unauthenticatedReq := &RequestTester{
-		method: "GET",
-		uri:    "/index",
-	}
-	authenticatedReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index",
-		apiToken: knownToken,
-	}
-	superuserReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index",
-		apiToken: s.cluster.SystemRootToken,
-	}
-	unauthPrefixReq := &RequestTester{
-		method: "GET",
-		uri:    "/index/" + TestHash[0:3],
-	}
-	authPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/" + TestHash[0:3],
-		apiToken: knownToken,
-	}
-	superuserPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/" + TestHash[0:3],
-		apiToken: s.cluster.SystemRootToken,
-	}
-	superuserNoSuchPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/abcd",
-		apiToken: s.cluster.SystemRootToken,
-	}
-	superuserInvalidPrefixReq := &RequestTester{
-		method:   "GET",
-		uri:      "/index/xyz",
-		apiToken: s.cluster.SystemRootToken,
-	}
-
-	// -------------------------------------------------------------
-	// Only the superuser should be allowed to issue /index requests.
-
-	// ---------------------------
-	// BlobSigning enabled
-	// This setting should not affect tests passing.
-	s.cluster.Collections.BlobSigning = true
-
-	// unauthenticated /index request
-	// => UnauthorizedError
-	response := IssueRequest(s.handler, unauthenticatedReq)
-	ExpectStatusCode(c,
-		"permissions on, unauthenticated request",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// unauthenticated /index/prefix request
-	// => UnauthorizedError
-	response = IssueRequest(s.handler, unauthPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, unauthenticated /index/prefix request",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// authenticated /index request, non-superuser
-	// => UnauthorizedError
-	response = IssueRequest(s.handler, authenticatedReq)
-	ExpectStatusCode(c,
-		"permissions on, authenticated request, non-superuser",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// authenticated /index/prefix request, non-superuser
-	// => UnauthorizedError
-	response = IssueRequest(s.handler, authPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, authenticated /index/prefix request, non-superuser",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// superuser /index request
-	// => OK
-	response = IssueRequest(s.handler, superuserReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	// ----------------------------
-	// BlobSigning disabled
-	// Valid Request should still pass.
-	s.cluster.Collections.BlobSigning = false
-
-	// superuser /index request
-	// => OK
-	response = IssueRequest(s.handler, superuserReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	expected := `^` + TestHash + `\+\d+ \d+\n` +
-		TestHash2 + `\+\d+ \d+\n\n$`
-	c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
-		"permissions on, superuser request"))
-
-	// superuser /index/prefix request
-	// => OK
-	response = IssueRequest(s.handler, superuserPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	expected = `^` + TestHash + `\+\d+ \d+\n\n$`
-	c.Check(response.Body.String(), check.Matches, expected, check.Commentf(
-		"permissions on, superuser /index/prefix request"))
-
-	// superuser /index/{no-such-prefix} request
-	// => OK
-	response = IssueRequest(s.handler, superuserNoSuchPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusOK,
-		response)
-
-	if "\n" != response.Body.String() {
-		c.Errorf("Expected empty response for %s. Found %s", superuserNoSuchPrefixReq.uri, response.Body.String())
-	}
-
-	// superuser /index/{invalid-prefix} request
-	// => StatusBadRequest
-	response = IssueRequest(s.handler, superuserInvalidPrefixReq)
-	ExpectStatusCode(c,
-		"permissions on, superuser request",
-		http.StatusBadRequest,
-		response)
-}
-
-// TestDeleteHandler
-//
-// Cases tested:
-//
-//	With no token and with a non-data-manager token:
-//	* Delete existing block
-//	  (test for 403 Forbidden, confirm block not deleted)
-//
-//	With data manager token:
-//
-//	* Delete existing block
-//	  (test for 200 OK, response counts, confirm block deleted)
-//
-//	* Delete nonexistent block
-//	  (test for 200 OK, response counts)
-//
-//	TODO(twp):
-//
-//	* Delete block on read-only and read-write volume
-//	  (test for 200 OK, response with copies_deleted=1,
-//	  copies_failed=1, confirm block deleted only on r/w volume)
-//
-//	* Delete block on read-only volume only
-//	  (test for 200 OK, response with copies_deleted=0, copies_failed=1,
-//	  confirm block not deleted)
-func (s *RouterSuite) TestDeleteHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	vols := s.handler.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-
-	// Explicitly set the BlobSigningTTL to 0 for these
-	// tests, to ensure the MockVolume deletes the blocks
-	// even though they have just been created.
-	s.cluster.Collections.BlobSigningTTL = arvados.Duration(0)
-
-	var userToken = "NOT DATA MANAGER TOKEN"
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	s.cluster.Collections.BlobTrash = true
-
-	unauthReq := &RequestTester{
-		method: "DELETE",
-		uri:    "/" + TestHash,
-	}
-
-	userReq := &RequestTester{
-		method:   "DELETE",
-		uri:      "/" + TestHash,
-		apiToken: userToken,
-	}
-
-	superuserExistingBlockReq := &RequestTester{
-		method:   "DELETE",
-		uri:      "/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-
-	superuserNonexistentBlockReq := &RequestTester{
-		method:   "DELETE",
-		uri:      "/" + TestHash2,
-		apiToken: s.cluster.SystemRootToken,
-	}
-
-	// Unauthenticated request returns PermissionError.
-	var response *httptest.ResponseRecorder
-	response = IssueRequest(s.handler, unauthReq)
-	ExpectStatusCode(c,
-		"unauthenticated request",
-		PermissionError.HTTPCode,
-		response)
-
-	// Authenticated non-admin request returns PermissionError.
-	response = IssueRequest(s.handler, userReq)
-	ExpectStatusCode(c,
-		"authenticated non-admin request",
-		PermissionError.HTTPCode,
-		response)
-
-	// Authenticated admin request for nonexistent block.
-	type deletecounter struct {
-		Deleted int `json:"copies_deleted"`
-		Failed  int `json:"copies_failed"`
-	}
-	var responseDc, expectedDc deletecounter
-
-	response = IssueRequest(s.handler, superuserNonexistentBlockReq)
-	ExpectStatusCode(c,
-		"data manager request, nonexistent block",
-		http.StatusNotFound,
-		response)
-
-	// Authenticated admin request for existing block while BlobTrash is false.
-	s.cluster.Collections.BlobTrash = false
-	response = IssueRequest(s.handler, superuserExistingBlockReq)
-	ExpectStatusCode(c,
-		"authenticated request, existing block, method disabled",
-		MethodDisabledError.HTTPCode,
-		response)
-	s.cluster.Collections.BlobTrash = true
-
-	// Authenticated admin request for existing block.
-	response = IssueRequest(s.handler, superuserExistingBlockReq)
-	ExpectStatusCode(c,
-		"data manager request, existing block",
-		http.StatusOK,
-		response)
-	// Expect response {"copies_deleted":1,"copies_failed":0}
-	expectedDc = deletecounter{1, 0}
-	json.NewDecoder(response.Body).Decode(&responseDc)
-	if responseDc != expectedDc {
-		c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
-			expectedDc, responseDc)
-	}
-	// Confirm the block has been deleted
-	buf := make([]byte, BlockSize)
-	_, err := vols[0].Get(context.Background(), TestHash, buf)
-	var blockDeleted = os.IsNotExist(err)
-	if !blockDeleted {
-		c.Error("superuserExistingBlockReq: block not deleted")
-	}
-
-	// A DELETE request on a block newer than BlobSigningTTL
-	// should return success but leave the block on the volume.
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-	s.cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour)
-
-	response = IssueRequest(s.handler, superuserExistingBlockReq)
-	ExpectStatusCode(c,
-		"data manager request, existing block",
-		http.StatusOK,
-		response)
-	// Expect response {"copies_deleted":1,"copies_failed":0}
-	expectedDc = deletecounter{1, 0}
-	json.NewDecoder(response.Body).Decode(&responseDc)
-	if responseDc != expectedDc {
-		c.Errorf("superuserExistingBlockReq\nexpected: %+v\nreceived: %+v",
-			expectedDc, responseDc)
-	}
-	// Confirm the block has NOT been deleted.
-	_, err = vols[0].Get(context.Background(), TestHash, buf)
-	if err != nil {
-		c.Errorf("testing delete on new block: %s\n", err)
-	}
-}
-
-// TestPullHandler
-//
-// Test handling of the PUT /pull statement.
-//
-// Cases tested: syntactically valid and invalid pull lists, from the
-// data manager and from unprivileged users:
-//
-//  1. Valid pull list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  2. Invalid pull request from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  3. Valid pull request from the data manager
-//     (expected result: 200 OK with request body "Received 3 pull
-//     requests"
-//
-//  4. Invalid pull request from the data manager
-//     (expected result: 400 Bad Request)
-//
-// Test that in the end, the pull manager received a good pull list with
-// the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously.  Make sure that none of them return 400
-// Bad Request and that pullq.GetList() returns a valid list.
-func (s *RouterSuite) TestPullHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	// Replace the router's pullq -- which the worker goroutines
-	// started by setup() are now receiving from -- with a new
-	// one, so we can see what the handler sends to it.
-	pullq := NewWorkQueue()
-	s.handler.Handler.(*router).pullq = pullq
-
-	var userToken = "USER TOKEN"
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	goodJSON := []byte(`[
-		{
-			"locator":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+12345",
-			"servers":[
-				"http://server1",
-				"http://server2"
-		 	]
-		},
-		{
-			"locator":"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+12345",
-			"servers":[]
-		},
-		{
-			"locator":"cccccccccccccccccccccccccccccccc+12345",
-			"servers":["http://server1"]
-		}
-	]`)
-
-	badJSON := []byte(`{ "key":"I'm a little teapot" }`)
-
-	type pullTest struct {
-		name         string
-		req          RequestTester
-		responseCode int
-		responseBody string
-	}
-	var testcases = []pullTest{
-		{
-			"Valid pull list from an ordinary user",
-			RequestTester{"/pull", userToken, "PUT", goodJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Invalid pull request from an ordinary user",
-			RequestTester{"/pull", userToken, "PUT", badJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Valid pull request from the data manager",
-			RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
-			http.StatusOK,
-			"Received 3 pull requests\n",
-		},
-		{
-			"Invalid pull request from the data manager",
-			RequestTester{"/pull", s.cluster.SystemRootToken, "PUT", badJSON, ""},
-			http.StatusBadRequest,
-			"",
-		},
-	}
-
-	for _, tst := range testcases {
-		response := IssueRequest(s.handler, &tst.req)
-		ExpectStatusCode(c, tst.name, tst.responseCode, response)
-		ExpectBody(c, tst.name, tst.responseBody, response)
-	}
-
-	// The Keep pull manager should have received one good list with 3
-	// requests on it.
-	for i := 0; i < 3; i++ {
-		var item interface{}
-		select {
-		case item = <-pullq.NextItem:
-		case <-time.After(time.Second):
-			c.Error("timed out")
-		}
-		if _, ok := item.(PullRequest); !ok {
-			c.Errorf("item %v could not be parsed as a PullRequest", item)
-		}
-	}
-
-	expectChannelEmpty(c, pullq.NextItem)
-}
-
-// TestTrashHandler
-//
-// Test cases:
-//
-// Cases tested: syntactically valid and invalid trash lists, from the
-// data manager and from unprivileged users:
-//
-//  1. Valid trash list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  2. Invalid trash list from an ordinary user
-//     (expected result: 401 Unauthorized)
-//
-//  3. Valid trash list from the data manager
-//     (expected result: 200 OK with request body "Received 3 trash
-//     requests"
-//
-//  4. Invalid trash list from the data manager
-//     (expected result: 400 Bad Request)
-//
-// Test that in the end, the trash collector received a good list
-// trash list with the expected number of requests.
-//
-// TODO(twp): test concurrency: launch 100 goroutines to update the
-// pull list simultaneously.  Make sure that none of them return 400
-// Bad Request and that replica.Dump() returns a valid list.
-func (s *RouterSuite) TestTrashHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	// Replace the router's trashq -- which the worker goroutines
-	// started by setup() are now receiving from -- with a new
-	// one, so we can see what the handler sends to it.
-	trashq := NewWorkQueue()
-	s.handler.Handler.(*router).trashq = trashq
-
-	var userToken = "USER TOKEN"
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	goodJSON := []byte(`[
-		{
-			"locator":"block1",
-			"block_mtime":1409082153
-		},
-		{
-			"locator":"block2",
-			"block_mtime":1409082153
-		},
-		{
-			"locator":"block3",
-			"block_mtime":1409082153
-		}
-	]`)
-
-	badJSON := []byte(`I am not a valid JSON string`)
-
-	type trashTest struct {
-		name         string
-		req          RequestTester
-		responseCode int
-		responseBody string
-	}
-
-	var testcases = []trashTest{
-		{
-			"Valid trash list from an ordinary user",
-			RequestTester{"/trash", userToken, "PUT", goodJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Invalid trash list from an ordinary user",
-			RequestTester{"/trash", userToken, "PUT", badJSON, ""},
-			http.StatusUnauthorized,
-			"Unauthorized\n",
-		},
-		{
-			"Valid trash list from the data manager",
-			RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", goodJSON, ""},
-			http.StatusOK,
-			"Received 3 trash requests\n",
-		},
-		{
-			"Invalid trash list from the data manager",
-			RequestTester{"/trash", s.cluster.SystemRootToken, "PUT", badJSON, ""},
-			http.StatusBadRequest,
-			"",
-		},
-	}
-
-	for _, tst := range testcases {
-		response := IssueRequest(s.handler, &tst.req)
-		ExpectStatusCode(c, tst.name, tst.responseCode, response)
-		ExpectBody(c, tst.name, tst.responseBody, response)
-	}
-
-	// The trash collector should have received one good list with 3
-	// requests on it.
-	for i := 0; i < 3; i++ {
-		item := <-trashq.NextItem
-		if _, ok := item.(TrashRequest); !ok {
-			c.Errorf("item %v could not be parsed as a TrashRequest", item)
-		}
-	}
-
-	expectChannelEmpty(c, trashq.NextItem)
-}
-
-// ====================
-// Helper functions
-// ====================
-
-// IssueTestRequest executes an HTTP request described by rt, to a
-// REST router.  It returns the HTTP response to the request.
-func IssueRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
-	response := httptest.NewRecorder()
-	body := bytes.NewReader(rt.requestBody)
-	req, _ := http.NewRequest(rt.method, rt.uri, body)
-	if rt.apiToken != "" {
-		req.Header.Set("Authorization", "OAuth2 "+rt.apiToken)
-	}
-	if rt.storageClasses != "" {
-		req.Header.Set("X-Keep-Storage-Classes", rt.storageClasses)
-	}
-	handler.ServeHTTP(response, req)
-	return response
-}
-
-func IssueHealthCheckRequest(handler http.Handler, rt *RequestTester) *httptest.ResponseRecorder {
-	response := httptest.NewRecorder()
-	body := bytes.NewReader(rt.requestBody)
-	req, _ := http.NewRequest(rt.method, rt.uri, body)
-	if rt.apiToken != "" {
-		req.Header.Set("Authorization", "Bearer "+rt.apiToken)
-	}
-	handler.ServeHTTP(response, req)
-	return response
-}
-
-// ExpectStatusCode checks whether a response has the specified status code,
-// and reports a test failure if not.
-func ExpectStatusCode(
-	c *check.C,
-	testname string,
-	expectedStatus int,
-	response *httptest.ResponseRecorder) {
-	c.Check(response.Code, check.Equals, expectedStatus, check.Commentf("%s", testname))
-}
-
-func ExpectBody(
-	c *check.C,
-	testname string,
-	expectedBody string,
-	response *httptest.ResponseRecorder) {
-	if expectedBody != "" && response.Body.String() != expectedBody {
-		c.Errorf("%s: expected response body '%s', got %+v",
-			testname, expectedBody, response)
-	}
-}
-
-// See #7121
-func (s *RouterSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	defer func(orig *bufferPool) {
-		bufs = orig
-	}(bufs)
-	bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-
-	ok := make(chan struct{})
-	go func() {
-		for i := 0; i < 2; i++ {
-			response := IssueRequest(s.handler,
-				&RequestTester{
-					method:      "PUT",
-					uri:         "/" + TestHash,
-					requestBody: TestBlock,
-				})
-			ExpectStatusCode(c,
-				"TestPutNeedsOnlyOneBuffer", http.StatusOK, response)
+		c.Logf("=== %s", trial)
+		methodpath := strings.Split(trial, " ")
+		req := httptest.NewRequest(methodpath[0], "http://example"+methodpath[1], nil)
+		resp := httptest.NewRecorder()
+		router.ServeHTTP(resp, req)
+		c.Check(resp.StatusCode, Equals, http.StatusBadRequest)
+	}
+}
+
+func (s *routerSuite) TestRequireAdminMgtToken(c *C) {
+	router, cancel := testRouter(c, s.cluster)
+	defer cancel()
+
+	for _, token := range []string{"badtoken", ""} {
+		for _, path := range []string{
+			"/pull",
+			"/trash",
+			"/index",
+			"/index/",
+			"/index/1234",
+			"/untrash/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+			"/debug.json",
+			"/status.json",
+		} {
+			c.Logf("=== %s", path)
+			req := httptest.NewRequest("GET", "http://example"+path, nil)
+			if token != "" {
+				req.Header.Set("Authorization", "Bearer "+token)
+			}
+			resp := httptest.NewRecorder()
+			router.ServeHTTP(resp, req)
+			c.Check(resp.StatusCode, Equals, http.StatusUnauthorized)
 		}
-		ok <- struct{}{}
-	}()
-
-	select {
-	case <-ok:
-	case <-time.After(time.Second):
-		c.Fatal("PUT deadlocks with MaxKeepBlobBuffers==1")
 	}
-}
-
-// Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *RouterSuite) TestPutHandlerNoBufferleak(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	ok := make(chan bool)
-	go func() {
-		for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
-			// Unauthenticated request, no server key
-			// => OK (unsigned response)
-			unsignedLocator := "/" + TestHash
-			response := IssueRequest(s.handler,
-				&RequestTester{
-					method:      "PUT",
-					uri:         unsignedLocator,
-					requestBody: TestBlock,
-				})
-			ExpectStatusCode(c,
-				"TestPutHandlerBufferleak", http.StatusOK, response)
-			ExpectBody(c,
-				"TestPutHandlerBufferleak",
-				TestHashPutResp, response)
-		}
-		ok <- true
-	}()
-	select {
-	case <-time.After(20 * time.Second):
-		// If the buffer pool leaks, the test goroutine hangs.
-		c.Fatal("test did not finish, assuming pool leaked")
-	case <-ok:
-	}
-}
-
-func (s *RouterSuite) TestGetHandlerClientDisconnect(c *check.C) {
-	s.cluster.Collections.BlobSigning = false
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	defer func(orig *bufferPool) {
-		bufs = orig
-	}(bufs)
-	bufs = newBufferPool(ctxlog.TestLogger(c), 1, BlockSize)
-	defer bufs.Put(bufs.Get(BlockSize))
-
-	err := s.handler.volmgr.AllWritable()[0].Put(context.Background(), TestHash, TestBlock)
-	c.Assert(err, check.IsNil)
-
+	req := httptest.NewRequest("TOUCH", "http://example/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", nil)
 	resp := httptest.NewRecorder()
-	ok := make(chan struct{})
-	go func() {
-		ctx, cancel := context.WithCancel(context.Background())
-		req, _ := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("/%s+%d", TestHash, len(TestBlock)), nil)
-		cancel()
-		s.handler.ServeHTTP(resp, req)
-		ok <- struct{}{}
-	}()
-
-	select {
-	case <-time.After(20 * time.Second):
-		c.Fatal("request took >20s, close notifier must be broken")
-	case <-ok:
-	}
-
-	ExpectStatusCode(c, "client disconnect", http.StatusServiceUnavailable, resp)
-	for i, v := range s.handler.volmgr.AllWritable() {
-		if calls := v.Volume.(*MockVolume).called["GET"]; calls != 0 {
-			c.Errorf("volume %d got %d calls, expected 0", i, calls)
-		}
-	}
+	router.ServeHTTP(resp, req)
+	c.Check(resp.StatusCode, Equals, http.StatusUnauthorized)
 }
 
-// Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
-// leak.
-func (s *RouterSuite) TestGetHandlerNoBufferLeak(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	vols := s.handler.volmgr.AllWritable()
-	if err := vols[0].Put(context.Background(), TestHash, TestBlock); err != nil {
-		c.Error(err)
-	}
-
-	ok := make(chan bool)
-	go func() {
-		for i := 0; i < s.cluster.API.MaxKeepBlobBuffers+1; i++ {
-			// Unauthenticated request, unsigned locator
-			// => OK
-			unsignedLocator := "/" + TestHash
-			response := IssueRequest(s.handler,
-				&RequestTester{
-					method: "GET",
-					uri:    unsignedLocator,
-				})
-			ExpectStatusCode(c,
-				"Unauthenticated request, unsigned locator", http.StatusOK, response)
-			ExpectBody(c,
-				"Unauthenticated request, unsigned locator",
-				string(TestBlock),
-				response)
-		}
-		ok <- true
-	}()
-	select {
-	case <-time.After(20 * time.Second):
-		// If the buffer pool leaks, the test goroutine hangs.
-		c.Fatal("test did not finish, assuming pool leaked")
-	case <-ok:
-	}
+func (s *routerSuite) TestVolumeErrorStatusCode(c *C) {
+	c.Fatal("todo: volume operation fails 'busy', router returns 5xx")
 }
 
-func (s *RouterSuite) TestPutStorageClasses(c *check.C) {
-	s.cluster.Volumes = map[string]arvados.Volume{
-		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
-		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
-		"zzzzz-nyw5e-222222222222222": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"readonly": true}, ReadOnly: true},
-	}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	rt := RequestTester{
-		method:      "PUT",
-		uri:         "/" + TestHash,
-		requestBody: TestBlock,
-	}
-
-	for _, trial := range []struct {
-		ask    string
-		expect string
-	}{
-		{"", ""},
-		{"default", "default=1"},
-		{" , default , default , ", "default=1"},
-		{"special", "extra=1, special=1"},
-		{"special, readonly", "extra=1, special=1"},
-		{"special, nonexistent", "extra=1, special=1"},
-		{"extra, special", "extra=1, special=1"},
-		{"default, special", "default=1, extra=1, special=1"},
-	} {
-		c.Logf("success case %#v", trial)
-		rt.storageClasses = trial.ask
-		resp := IssueRequest(s.handler, &rt)
-		if trial.expect == "" {
-			// any non-empty value is correct
-			c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Not(check.Equals), "")
-		} else {
-			c.Check(sortCommaSeparated(resp.Header().Get("X-Keep-Storage-Classes-Confirmed")), check.Equals, trial.expect)
-		}
-	}
-
-	for _, trial := range []struct {
-		ask string
-	}{
-		{"doesnotexist"},
-		{"doesnotexist, readonly"},
-		{"readonly"},
-	} {
-		c.Logf("failure case %#v", trial)
-		rt.storageClasses = trial.ask
-		resp := IssueRequest(s.handler, &rt)
-		c.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)
-	}
-}
-
-func sortCommaSeparated(s string) string {
-	slice := strings.Split(s, ", ")
-	sort.Strings(slice)
-	return strings.Join(slice, ", ")
-}
-
-func (s *RouterSuite) TestPutResponseHeader(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	resp := IssueRequest(s.handler, &RequestTester{
-		method:      "PUT",
-		uri:         "/" + TestHash,
-		requestBody: TestBlock,
-	})
-	c.Logf("%#v", resp)
-	c.Check(resp.Header().Get("X-Keep-Replicas-Stored"), check.Equals, "1")
-	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
-}
-
-func (s *RouterSuite) TestUntrashHandler(c *check.C) {
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	// Set up Keep volumes
-	vols := s.handler.volmgr.AllWritable()
-	vols[0].Put(context.Background(), TestHash, TestBlock)
-
-	s.cluster.SystemRootToken = "DATA MANAGER TOKEN"
-
-	// unauthenticatedReq => UnauthorizedError
-	unauthenticatedReq := &RequestTester{
-		method: "PUT",
-		uri:    "/untrash/" + TestHash,
-	}
-	response := IssueRequest(s.handler, unauthenticatedReq)
-	ExpectStatusCode(c,
-		"Unauthenticated request",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// notDataManagerReq => UnauthorizedError
-	notDataManagerReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/" + TestHash,
-		apiToken: knownToken,
-	}
-
-	response = IssueRequest(s.handler, notDataManagerReq)
-	ExpectStatusCode(c,
-		"Non-datamanager token",
-		UnauthorizedError.HTTPCode,
-		response)
-
-	// datamanagerWithBadHashReq => StatusBadRequest
-	datamanagerWithBadHashReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/thisisnotalocator",
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response = IssueRequest(s.handler, datamanagerWithBadHashReq)
-	ExpectStatusCode(c,
-		"Bad locator in untrash request",
-		http.StatusBadRequest,
-		response)
-
-	// datamanagerWrongMethodReq => StatusBadRequest
-	datamanagerWrongMethodReq := &RequestTester{
-		method:   "GET",
-		uri:      "/untrash/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response = IssueRequest(s.handler, datamanagerWrongMethodReq)
-	ExpectStatusCode(c,
-		"Only PUT method is supported for untrash",
-		http.StatusMethodNotAllowed,
-		response)
-
-	// datamanagerReq => StatusOK
-	datamanagerReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response = IssueRequest(s.handler, datamanagerReq)
-	ExpectStatusCode(c,
-		"",
-		http.StatusOK,
-		response)
-	c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
-}
-
-func (s *RouterSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
-	// Change all volumes to read-only
-	for uuid, v := range s.cluster.Volumes {
-		v.ReadOnly = true
-		s.cluster.Volumes[uuid] = v
-	}
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-
-	// datamanagerReq => StatusOK
-	datamanagerReq := &RequestTester{
-		method:   "PUT",
-		uri:      "/untrash/" + TestHash,
-		apiToken: s.cluster.SystemRootToken,
-	}
-	response := IssueRequest(s.handler, datamanagerReq)
-	ExpectStatusCode(c,
-		"No writable volumes",
-		http.StatusNotFound,
-		response)
-}
-
-func (s *RouterSuite) TestHealthCheckPing(c *check.C) {
-	s.cluster.ManagementToken = arvadostest.ManagementToken
-	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
-	pingReq := &RequestTester{
-		method:   "GET",
-		uri:      "/_health/ping",
-		apiToken: arvadostest.ManagementToken,
-	}
-	response := IssueHealthCheckRequest(s.handler, pingReq)
-	ExpectStatusCode(c,
-		"",
-		http.StatusOK,
-		response)
-	want := `{"health":"OK"}`
-	if !strings.Contains(response.Body.String(), want) {
-		c.Errorf("expected response to include %s: got %s", want, response.Body.String())
-	}
+func (s *routerSuite) TestCancelOnDisconnect(c *C) {
+	c.Fatal("todo: volume operation context is cancelled when client disconnects")
 }
diff --git a/services/keepstore/trash_worker.go b/services/keepstore/trash_worker.go
index 7ab0019b0d..7caae1391e 100644
--- a/services/keepstore/trash_worker.go
+++ b/services/keepstore/trash_worker.go
@@ -60,6 +60,16 @@ func (t *trasher) SetTrashList(newlist []trashListItem) {
 }
 
 func (t *trasher) runWorker() {
+	var mntsAllowTrash []*mnt
+	for _, mnt := range t.keepstore.mounts {
+		if mnt.AllowTrash {
+			mntsAllowTrash = append(mntsAllowTrash, mnt)
+		}
+	}
+	if len(mntsAllowTrash) == 0 {
+		t.keepstore.logger.Info("not running trash worker because there are no writable or trashable volumes")
+		return
+	}
 	for {
 		t.cond.L.Lock()
 		for len(t.todo) == 0 {
@@ -83,7 +93,7 @@ func (t *trasher) runWorker() {
 
 		var mnts []*mount
 		if item.MountUUID == "" {
-			mnts = t.keepstore.mountsW
+			mnts = mntsAllowTrash
 		} else if mnt := t.keepstore.mounts[item.MountUUID]; mnt == nil {
 			logger.Warnf("ignoring trash request for nonexistent mount %s", item.MountUUID)
 			continue
diff --git a/services/keepstore/trash_worker_test.go b/services/keepstore/trash_worker_test.go
index a1648c52cc..1c01051191 100644
--- a/services/keepstore/trash_worker_test.go
+++ b/services/keepstore/trash_worker_test.go
@@ -36,9 +36,18 @@ type TrashWorkerTestData struct {
 	ExpectLocator2 bool
 }
 
-// Delete block that does not exist in any of the keep volumes.
-// Expect no errors.
-func (s *HandlerSuite) TestTrashWorkerIntegration_GetNonExistingLocator(c *check.C) {
+func (s *routerSuite) TestTrashList_Clear(c *check.C) {
+	c.Fatal("todo")
+}
+
+func (s *routerSuite) TestTrashList_Execute(c *check.C) {
+	c.Fatal("todo: trash nonexistent block")
+	c.Fatal("todo: trash existing block")
+	c.Fatal("todo: trash block on only one volume")
+	c.Fatal("todo: trash block on volume with AllowTrash=false")
+}
+
+func (s *routerSuite) TestTrashList_Clear(c *check.C) {
 	s.cluster.Collections.BlobTrash = true
 	testData := TrashWorkerTestData{
 		Locator1: "5d41402abc4b2a76b9719d911017c592",
diff --git a/services/keepstore/volume_test.go b/services/keepstore/volume_test.go
index b353f12ef6..9162e4b057 100644
--- a/services/keepstore/volume_test.go
+++ b/services/keepstore/volume_test.go
@@ -5,10 +5,7 @@
 package keepstore
 
 import (
-	"bytes"
 	"context"
-	"crypto/md5"
-	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -69,9 +66,8 @@ type MockVolume struct {
 	Store      map[string][]byte
 	Timestamps map[string]time.Time
 
-	// Bad volumes return an error for every operation.
-	Bad            bool
-	BadVolumeError error
+	// If non-nil, all operations (except status) return Err.
+	Err error
 
 	// Touchable volumes' Touch() method succeeds for a locator
 	// that has been BlockWrite().
@@ -96,7 +92,7 @@ type MockVolume struct {
 
 // newMockVolume returns a non-Bad, non-Readonly, Touchable mock
 // volume.
-func newMockVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (volume, error) {
+func newMockVolume(params newVolumeParams) (volume, error) {
 	gate := make(chan struct{})
 	close(gate)
 	return &MockVolume{
@@ -106,10 +102,10 @@ func newMockVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logru
 		Touchable:  true,
 		called:     map[string]int{},
 		Gate:       gate,
-		cluster:    cluster,
-		volume:     volume,
-		logger:     logger,
-		metrics:    metrics,
+		cluster:    params.Cluster,
+		volume:     params.ConfigVolume,
+		logger:     params.Logger,
+		metrics:    params.MetricsVecs,
 	}, nil
 }
 
@@ -134,93 +130,79 @@ func (v *MockVolume) gotCall(method string) {
 	}
 }
 
-func (v *MockVolume) Compare(ctx context.Context, loc string, buf []byte) error {
-	v.gotCall("Compare")
+func (v *MockVolume) BlockRead(ctx context.Context, hash string, writeTo io.Writer) (int, error) {
+	v.gotCall("BlockRead")
 	<-v.Gate
-	if v.Bad {
-		return v.BadVolumeError
-	} else if block, ok := v.Store[loc]; ok {
-		if fmt.Sprintf("%x", md5.Sum(block)) != loc {
-			return DiskHashError
-		}
-		if bytes.Compare(buf, block) != 0 {
-			return CollisionError
-		}
-		return nil
-	} else {
-		return os.ErrNotExist
+	if v.Err != nil {
+		return 0, v.Err
 	}
-}
-
-func (v *MockVolume) Get(ctx context.Context, loc string, buf []byte) (int, error) {
-	v.gotCall("Get")
-	<-v.Gate
-	if v.Bad {
-		return 0, v.BadVolumeError
-	} else if block, ok := v.Store[loc]; ok {
-		copy(buf[:len(block)], block)
-		return len(block), nil
+	if data, ok := v.Store[hash]; ok {
+		return io.Write(data)
 	}
 	return 0, os.ErrNotExist
 }
 
-func (v *MockVolume) BlockWrite(ctx context.Context, loc string, block []byte) error {
+func (v *MockVolume) BlockWrite(ctx context.Context, hash string, data []byte) error {
 	v.gotCall("BlockWrite")
 	<-v.Gate
-	if v.Bad {
-		return v.BadVolumeError
+	if v.Err != nil {
+		return v.Err
 	}
 	if v.volume.ReadOnly {
-		return MethodDisabledError
+		return errMethodNotAllowed
 	}
-	v.Store[loc] = block
-	return v.Touch(loc)
+	v.mutex.Lock()
+	defer v.mutex.Unlock()
+	v.Store[hash] = data
+	v.Timestamps[hash] = time.Now()
+	return nil
 }
 
-func (v *MockVolume) Touch(loc string) error {
-	return v.TouchWithDate(loc, time.Now())
+func (v *MockVolume) Touch(hash string) error {
+	return v.TouchWithDate(hash, time.Now())
 }
 
-func (v *MockVolume) TouchWithDate(loc string, t time.Time) error {
+func (v *MockVolume) TouchWithDate(hash string, t time.Time) error {
 	v.gotCall("Touch")
 	<-v.Gate
 	if v.volume.ReadOnly {
-		return MethodDisabledError
+		return errMethodNotAllowed
 	}
-	if _, exists := v.Store[loc]; !exists {
+	v.mutex.Lock()
+	defer v.mutex.Unlock()
+	if _, exists := v.Store[hash]; !exists {
 		return os.ErrNotExist
 	}
-	if v.Touchable {
-		v.Timestamps[loc] = t
-		return nil
-	}
-	return errors.New("Touch failed")
+	v.Timestamps[hash] = t
+	return nil
 }
 
 func (v *MockVolume) Mtime(loc string) (time.Time, error) {
 	v.gotCall("Mtime")
 	<-v.Gate
-	var mtime time.Time
-	var err error
-	if v.Bad {
-		err = v.BadVolumeError
-	} else if t, ok := v.Timestamps[loc]; ok {
-		mtime = t
+	if v.Err != nil {
+		return time.Time{}, v.Err
+	}
+	if t, ok := v.Timestamps[loc]; !ok {
+		return time.Time{}, os.ErrNotExist
 	} else {
-		err = os.ErrNotExist
+		return t, nil
 	}
-	return mtime, err
 }
 
-func (v *MockVolume) IndexTo(prefix string, w io.Writer) error {
+func (v *MockVolume) Index(ctx context.Context, prefix string, w io.Writer) error {
 	v.gotCall("IndexTo")
 	<-v.Gate
-	for loc, block := range v.Store {
-		if !IsValidLocator(loc) || !strings.HasPrefix(loc, prefix) {
+	v.mutex.Lock()
+	defer v.mutex.Unlock()
+	for hash, data := range v.Store {
+		if err := ctx.Err(); err != nil {
+			return err
+		}
+		if !IsValidLocator(hash) || !strings.HasPrefix(hash, prefix) {
 			continue
 		}
-		_, err := fmt.Fprintf(w, "%s+%d %d\n",
-			loc, len(block), 123456789)
+		_, err := fmt.Fprintf(w, "%s+%d %d\n", hash, len(data), v.Timestamps[hash].Unix())
 		if err != nil {
 			return err
 		}
@@ -228,27 +210,34 @@ func (v *MockVolume) IndexTo(prefix string, w io.Writer) error {
 	return nil
 }
 
-func (v *MockVolume) Trash(loc string) error {
+func (v *MockVolume) Trash(hash string) error {
 	v.gotCall("Delete")
 	<-v.Gate
 	if v.volume.ReadOnly {
-		return MethodDisabledError
+		return errMethodNotAllowed
 	}
-	if _, ok := v.Store[loc]; ok {
-		if time.Since(v.Timestamps[loc]) < time.Duration(v.cluster.Collections.BlobSigningTTL) {
-			return nil
-		}
-		delete(v.Store, loc)
+	if _, ok := v.Store[hash]; !ok {
+		return os.ErrNotExist
+	}
+	if time.Since(v.Timestamps[hash]) < time.Duration(v.cluster.Collections.BlobSigningTTL) {
 		return nil
 	}
-	return os.ErrNotExist
+	delete(v.Store, hash)
+	delete(v.Timestamps, hash)
+	return nil
 }
 
-func (v *MockVolume) GetDeviceID() string {
+func (v *MockVolume) DeviceID() string {
 	return "mock-device-id"
 }
 
-func (v *MockVolume) Untrash(loc string) error {
+func (v *MockVolume) Untrash(hash string) error {
+	v.mutex.Lock()
+	defer v.mutex.Unlock()
+	data, ok := v.Trash[hash]
+	if !ok {
+		return os.ErrNotExist
+	}
 	return nil
 }
 

commit cdda4e54d99a079773cbf68b005bdbf0a977408b
Author: Tom Clegg <tom at curii.com>
Date:   Mon Jan 29 14:04:03 2024 -0500

    2960: Update tests.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
index 48d58ee9bf..7e00db1d93 100644
--- a/services/keepstore/azure_blob_volume_test.go
+++ b/services/keepstore/azure_blob_volume_test.go
@@ -13,6 +13,7 @@ import (
 	"encoding/xml"
 	"flag"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"math/rand"
 	"net"
@@ -87,7 +88,7 @@ func (h *azStubHandler) TouchWithDate(container, hash string, t time.Time) {
 	blob.Mtime = t
 }
 
-func (h *azStubHandler) PutRaw(container, hash string, data []byte) {
+func (h *azStubHandler) BlockWriteRaw(container, hash string, data []byte) {
 	h.Lock()
 	defer h.Unlock()
 	h.blobs[container+"|"+hash] = &azBlob{
@@ -478,16 +479,16 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeRangeFenceposts(c *check.C) {
 			data[i] = byte((i + 7) & 0xff)
 		}
 		hash := fmt.Sprintf("%x", md5.Sum(data))
-		err := v.Put(context.Background(), hash, data)
+		err := v.BlockWrite(context.Background(), hash, data)
 		if err != nil {
 			c.Error(err)
 		}
-		gotData := make([]byte, len(data))
-		gotLen, err := v.Get(context.Background(), hash, gotData)
+		gotData := bytes.NewBuffer(nil)
+		gotLen, err := v.BlockRead(context.Background(), hash, gotData)
 		if err != nil {
 			c.Error(err)
 		}
-		gotHash := fmt.Sprintf("%x", md5.Sum(gotData))
+		gotHash := fmt.Sprintf("%x", md5.Sum(gotData.Bytes()))
 		if gotLen != size {
 			c.Errorf("length mismatch: got %d != %d", gotLen, size)
 		}
@@ -508,29 +509,28 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRace(c *check.C) {
 	wg.Add(1)
 	go func() {
 		defer wg.Done()
-		err := v.Put(context.Background(), TestHash, TestBlock)
+		err := v.BlockWrite(context.Background(), TestHash, TestBlock)
 		if err != nil {
 			c.Error(err)
 		}
 	}()
-	continuePut := make(chan struct{})
-	// Wait for the stub's Put to create the empty blob
-	v.azHandler.race <- continuePut
+	continueBlockWrite := make(chan struct{})
+	// Wait for the stub's BlockWrite to create the empty blob
+	v.azHandler.race <- continueBlockWrite
 	wg.Add(1)
 	go func() {
 		defer wg.Done()
-		buf := make([]byte, len(TestBlock))
-		_, err := v.Get(context.Background(), TestHash, buf)
+		_, err := v.BlockRead(context.Background(), TestHash, io.Discard)
 		if err != nil {
 			c.Error(err)
 		}
 	}()
-	// Wait for the stub's Get to get the empty blob
+	// Wait for the stub's BlockRead to get the empty blob
 	close(v.azHandler.race)
-	// Allow stub's Put to continue, so the real data is ready
-	// when the volume's Get retries
-	<-continuePut
-	// Wait for Get() and Put() to finish
+	// Allow stub's BlockWrite to continue, so the real data is ready
+	// when the volume's BlockRead retries
+	<-continueBlockWrite
+	// Wait for BlockRead() and BlockWrite() to finish
 	wg.Wait()
 }
 
@@ -540,10 +540,10 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *che
 	v.AzureBlobVolume.WriteRacePollTime.Set("5ms")
 	defer v.Teardown()
 
-	v.PutRaw(TestHash, nil)
+	v.BlockWriteRaw(TestHash, nil)
 
 	buf := new(bytes.Buffer)
-	v.IndexTo("", buf)
+	v.Index(context.Background(), "", buf)
 	if buf.Len() != 0 {
 		c.Errorf("Index %+q should be empty", buf.Bytes())
 	}
@@ -553,47 +553,40 @@ func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeCreateBlobRaceDeadline(c *che
 	allDone := make(chan struct{})
 	go func() {
 		defer close(allDone)
-		buf := make([]byte, BlockSize)
-		n, err := v.Get(context.Background(), TestHash, buf)
+		buf := bytes.NewBuffer(nil)
+		n, err := v.BlockRead(context.Background(), TestHash, buf)
 		if err != nil {
 			c.Error(err)
 			return
 		}
 		if n != 0 {
-			c.Errorf("Got %+q, expected empty buf", buf[:n])
+			c.Errorf("Got %+q (n=%d), expected empty buf", buf.Bytes(), n)
 		}
 	}()
 	select {
 	case <-allDone:
 	case <-time.After(time.Second):
-		c.Error("Get should have stopped waiting for race when block was 2s old")
+		c.Error("BlockRead should have stopped waiting for race when block was 2s old")
 	}
 
 	buf.Reset()
-	v.IndexTo("", buf)
+	v.Index(context.Background(), "", buf)
 	if !bytes.HasPrefix(buf.Bytes(), []byte(TestHash+"+0")) {
 		c.Errorf("Index %+q should have %+q", buf.Bytes(), TestHash+"+0")
 	}
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelGet(c *check.C) {
+func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockRead(c *check.C) {
 	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
-		v.PutRaw(TestHash, TestBlock)
-		_, err := v.Get(ctx, TestHash, make([]byte, BlockSize))
+		v.BlockWriteRaw(TestHash, TestBlock)
+		_, err := v.BlockRead(ctx, TestHash, io.Discard)
 		return err
 	})
 }
 
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelPut(c *check.C) {
+func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelBlockWrite(c *check.C) {
 	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
-		return v.Put(ctx, TestHash, make([]byte, BlockSize))
-	})
-}
-
-func (s *StubbedAzureBlobSuite) TestAzureBlobVolumeContextCancelCompare(c *check.C) {
-	s.testAzureBlobVolumeContextCancel(c, func(ctx context.Context, v *TestableAzureBlobVolume) error {
-		v.PutRaw(TestHash, TestBlock)
-		return v.Compare(ctx, TestHash, TestBlock2)
+		return v.BlockWrite(ctx, TestHash, make([]byte, BlockSize))
 	})
 }
 
@@ -647,31 +640,31 @@ func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
 	c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
 
 	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
-	_, err := volume.Get(context.Background(), loc, make([]byte, 3))
+	_, err := volume.BlockRead(context.Background(), loc, io.Discard)
 	c.Check(err, check.NotNil)
 	c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
 	c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
 	c.Check(stats(), check.Matches, `.*"storage\.AzureStorageServiceError 404 \(404 Not Found\)":[^0].*`)
 	c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
 
-	err = volume.Put(context.Background(), loc, []byte("foo"))
+	err = volume.BlockWrite(context.Background(), loc, []byte("foo"))
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
 	c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
 
-	_, err = volume.Get(context.Background(), loc, make([]byte, 3))
+	_, err = volume.BlockRead(context.Background(), loc, io.Discard)
 	c.Check(err, check.IsNil)
-	_, err = volume.Get(context.Background(), loc, make([]byte, 3))
+	_, err = volume.BlockRead(context.Background(), loc, io.Discard)
 	c.Check(err, check.IsNil)
 	c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
 }
 
-func (v *TestableAzureBlobVolume) PutRaw(locator string, data []byte) {
-	v.azHandler.PutRaw(v.ContainerName, locator, data)
+func (v *TestableAzureBlobVolume) BlockWriteRaw(locator string, data []byte) {
+	v.azHandler.BlockWriteRaw(v.ContainerName, locator, data)
 }
 
-func (v *TestableAzureBlobVolume) TouchWithDate(locator string, lastPut time.Time) {
-	v.azHandler.TouchWithDate(v.ContainerName, locator, lastPut)
+func (v *TestableAzureBlobVolume) TouchWithDate(locator string, lastBlockWrite time.Time) {
+	v.azHandler.TouchWithDate(v.ContainerName, locator, lastBlockWrite)
 }
 
 func (v *TestableAzureBlobVolume) Teardown() {
diff --git a/services/keepstore/bufferpool_test.go b/services/keepstore/bufferpool_test.go
index c348c7765c..8ecc833228 100644
--- a/services/keepstore/bufferpool_test.go
+++ b/services/keepstore/bufferpool_test.go
@@ -8,6 +8,7 @@ import (
 	"time"
 
 	"git.arvados.org/arvados.git/sdk/go/ctxlog"
+	"github.com/prometheus/client_golang/prometheus"
 	. "gopkg.in/check.v1"
 )
 
@@ -17,7 +18,7 @@ var bufferPoolTestSize = 10
 
 type BufferPoolSuite struct{}
 
-func (s *BufferPoolSuite) TearDownTest(c *C) {
+func (s *BufferPoolSuite) SetUpTest(c *C) {
 	bufferPoolBlockSize = bufferPoolTestSize
 }
 
@@ -26,7 +27,7 @@ func (s *BufferPoolSuite) TearDownTest(c *C) {
 }
 
 func (s *BufferPoolSuite) TestBufferPoolBufSize(c *C) {
-	bufs := newBufferPool(ctxlog.TestLogger(c), 2)
+	bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
 	b1 := bufs.Get()
 	bufs.Get()
 	bufs.Put(b1)
@@ -35,14 +36,14 @@ func (s *BufferPoolSuite) TestBufferPoolBufSize(c *C) {
 }
 
 func (s *BufferPoolSuite) TestBufferPoolUnderLimit(c *C) {
-	bufs := newBufferPool(ctxlog.TestLogger(c), 3)
+	bufs := newBufferPool(ctxlog.TestLogger(c), 3, prometheus.NewRegistry())
 	b1 := bufs.Get()
 	bufs.Get()
 	testBufferPoolRace(c, bufs, b1, "Get")
 }
 
 func (s *BufferPoolSuite) TestBufferPoolAtLimit(c *C) {
-	bufs := newBufferPool(ctxlog.TestLogger(c), 2)
+	bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
 	b1 := bufs.Get()
 	bufs.Get()
 	testBufferPoolRace(c, bufs, b1, "Put")
@@ -66,7 +67,7 @@ func testBufferPoolRace(c *C, bufs *bufferPool, unused []byte, expectWin string)
 }
 
 func (s *BufferPoolSuite) TestBufferPoolReuse(c *C) {
-	bufs := newBufferPool(ctxlog.TestLogger(c), 2)
+	bufs := newBufferPool(ctxlog.TestLogger(c), 2, prometheus.NewRegistry())
 	bufs.Get()
 	last := bufs.Get()
 	// The buffer pool is allowed to throw away unused buffers
diff --git a/services/keepstore/proxy_remote_test.go b/services/keepstore/proxy_remote_test.go
index 534371cc0e..923d1b805e 100644
--- a/services/keepstore/proxy_remote_test.go
+++ b/services/keepstore/proxy_remote_test.go
@@ -29,7 +29,7 @@ var _ = check.Suite(&ProxyRemoteSuite{})
 
 type ProxyRemoteSuite struct {
 	cluster *arvados.Cluster
-	handler *handler
+	handler *router
 
 	remoteClusterID      string
 	remoteBlobSigningKey []byte
diff --git a/services/keepstore/pull_worker_integration_test.go b/services/keepstore/pull_worker_integration_test.go
index 3855b4ecd3..b445f2f082 100644
--- a/services/keepstore/pull_worker_integration_test.go
+++ b/services/keepstore/pull_worker_integration_test.go
@@ -25,7 +25,7 @@ type PullWorkIntegrationTestData struct {
 	GetError string
 }
 
-func (s *HandlerSuite) setupPullWorkerIntegrationTest(c *check.C, testData PullWorkIntegrationTestData, wantData bool) PullRequest {
+func (s *HandlerSuite) setupPullWorkerIntegrationTest(c *check.C, testData PullWorkIntegrationTestData, wantData bool) pullListItem {
 	arvadostest.StartKeep(2, false)
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 	// Put content if the test needs it
@@ -39,11 +39,9 @@ func (s *HandlerSuite) setupPullWorkerIntegrationTest(c *check.C, testData PullW
 		}
 	}
 
-	// Create pullRequest for the test
-	pullRequest := PullRequest{
+	return pullListItem{
 		Locator: testData.Locator,
 	}
-	return pullRequest
 }
 
 // Do a get on a block that is not existing in any of the keep servers.
@@ -83,7 +81,7 @@ func (s *HandlerSuite) TestPullWorkerIntegration_GetExistingLocator(c *check.C)
 // Perform the test.
 // The test directly invokes the "PullItemAndProcess" rather than
 // putting an item on the pullq so that the errors can be verified.
-func (s *HandlerSuite) performPullWorkerIntegrationTest(testData PullWorkIntegrationTestData, pullRequest PullRequest, c *check.C) {
+func (s *HandlerSuite) performPullWorkerIntegrationTest(testData PullWorkIntegrationTestData, item pullListItem, c *check.C) {
 
 	// Override writePulledBlock to mock PutBlock functionality
 	defer func(orig func(*RRVolumeManager, Volume, []byte, string) error) { writePulledBlock = orig }(writePulledBlock)
@@ -104,7 +102,7 @@ func (s *HandlerSuite) performPullWorkerIntegrationTest(testData PullWorkIntegra
 		return rdr, int64(len(testData.Content)), "", nil
 	}
 
-	err := s.handler.pullItemAndProcess(pullRequest)
+	err := s.handler.pullItemAndProcess(item)
 
 	if len(testData.GetError) > 0 {
 		if (err == nil) || (!strings.Contains(err.Error(), testData.GetError)) {
diff --git a/services/keepstore/pull_worker_test.go b/services/keepstore/pull_worker_test.go
index 2626e66d88..fa11d47508 100644
--- a/services/keepstore/pull_worker_test.go
+++ b/services/keepstore/pull_worker_test.go
@@ -24,7 +24,7 @@ var _ = Suite(&PullWorkerTestSuite{})
 
 type PullWorkerTestSuite struct {
 	cluster *arvados.Cluster
-	handler *handler
+	handler *router
 
 	testPullLists map[string]string
 	readContent   string
diff --git a/services/keepstore/handler_test.go b/services/keepstore/router_test.go
similarity index 96%
rename from services/keepstore/handler_test.go
rename to services/keepstore/router_test.go
index 5bdafb77c2..73cddcff26 100644
--- a/services/keepstore/handler_test.go
+++ b/services/keepstore/router_test.go
@@ -2,15 +2,6 @@
 //
 // SPDX-License-Identifier: AGPL-3.0
 
-// Tests for Keep HTTP handlers:
-//
-// - GetBlockHandler
-// - PutBlockHandler
-// - IndexHandler
-//
-// The HTTP handlers are responsible for enforcing permission policy,
-// so these tests must exercise all possible permission permutations.
-
 package keepstore
 
 import (
@@ -55,18 +46,18 @@ func testCluster(t TB) *arvados.Cluster {
 
 var _ = check.Suite(&HandlerSuite{})
 
-type HandlerSuite struct {
+type RouterSuite struct {
 	cluster *arvados.Cluster
-	handler *handler
+	handler *router
 }
 
-func (s *HandlerSuite) SetUpTest(c *check.C) {
+func (s *RouterSuite) SetUpTest(c *check.C) {
 	s.cluster = testCluster(c)
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"},
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock"},
 	}
-	s.handler = &handler{}
+	s.handler = newHandlerOrErrorHandler(context.Background(), s.cluster, s.cluster.SystemRootToken, prometheus.NewRegistry()).(*router)
 }
 
 // A RequestTester represents the parameters for an HTTP request to
@@ -86,7 +77,7 @@ type RequestTester struct {
 //   - permissions on, unauthenticated request, signed locator
 //   - permissions on, authenticated request, expired locator
 //   - permissions on, authenticated request, signed locator, transient error from backend
-func (s *HandlerSuite) TestGetHandler(c *check.C) {
+func (s *RouterSuite) TestGetHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	vols := s.handler.volmgr.AllWritable()
@@ -205,7 +196,7 @@ func (s *HandlerSuite) TestGetHandler(c *check.C) {
 //   - no server key
 //   - with server key, authenticated request, unsigned locator
 //   - with server key, unauthenticated request, unsigned locator
-func (s *HandlerSuite) TestPutHandler(c *check.C) {
+func (s *RouterSuite) TestPutHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	// --------------
@@ -275,7 +266,7 @@ func (s *HandlerSuite) TestPutHandler(c *check.C) {
 		TestHashPutResp, response)
 }
 
-func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
+func (s *RouterSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
 	s.cluster.Volumes["zzzzz-nyw5e-000000000000000"] = arvados.Volume{Driver: "mock", ReadOnly: true}
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
@@ -318,7 +309,7 @@ func (s *HandlerSuite) TestPutAndDeleteSkipReadonlyVolumes(c *check.C) {
 	}
 }
 
-func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
+func (s *RouterSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
 			Driver:         "mock",
@@ -366,7 +357,7 @@ func (s *HandlerSuite) TestReadsOrderedByStorageClassPriority(c *check.C) {
 	}
 }
 
-func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
+func (s *RouterSuite) TestPutWithNoWritableVolumes(c *check.C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
 			Driver:         "mock",
@@ -386,7 +377,7 @@ func (s *HandlerSuite) TestPutWithNoWritableVolumes(c *check.C) {
 	c.Check(s.handler.volmgr.mountMap["zzzzz-nyw5e-111111111111111"].Volume.(*MockVolume).CallCount("Put"), check.Equals, 0)
 }
 
-func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
+func (s *RouterSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-111111111111111": {
 			Driver:         "mock",
@@ -455,7 +446,7 @@ func (s *HandlerSuite) TestConcurrentWritesToMultipleStorageClasses(c *check.C)
 }
 
 // Test TOUCH requests.
-func (s *HandlerSuite) TestTouchHandler(c *check.C) {
+func (s *RouterSuite) TestTouchHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 	vols := s.handler.volmgr.AllWritable()
 	vols[0].Put(context.Background(), TestHash, TestBlock)
@@ -515,7 +506,7 @@ func (s *HandlerSuite) TestTouchHandler(c *check.C) {
 //
 // The only /index requests that should succeed are those issued by the
 // superuser. They should pass regardless of the value of BlobSigning.
-func (s *HandlerSuite) TestIndexHandler(c *check.C) {
+func (s *RouterSuite) TestIndexHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	// Include multiple blocks on different volumes, and
@@ -691,7 +682,7 @@ func (s *HandlerSuite) TestIndexHandler(c *check.C) {
 //	* Delete block on read-only volume only
 //	  (test for 200 OK, response with copies_deleted=0, copies_failed=1,
 //	  confirm block not deleted)
-func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
+func (s *RouterSuite) TestDeleteHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	vols := s.handler.volmgr.AllWritable()
@@ -838,7 +829,7 @@ func (s *HandlerSuite) TestDeleteHandler(c *check.C) {
 // TODO(twp): test concurrency: launch 100 goroutines to update the
 // pull list simultaneously.  Make sure that none of them return 400
 // Bad Request and that pullq.GetList() returns a valid list.
-func (s *HandlerSuite) TestPullHandler(c *check.C) {
+func (s *RouterSuite) TestPullHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	// Replace the router's pullq -- which the worker goroutines
@@ -952,7 +943,7 @@ func (s *HandlerSuite) TestPullHandler(c *check.C) {
 // TODO(twp): test concurrency: launch 100 goroutines to update the
 // pull list simultaneously.  Make sure that none of them return 400
 // Bad Request and that replica.Dump() returns a valid list.
-func (s *HandlerSuite) TestTrashHandler(c *check.C) {
+func (s *RouterSuite) TestTrashHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 	// Replace the router's trashq -- which the worker goroutines
 	// started by setup() are now receiving from -- with a new
@@ -1085,7 +1076,7 @@ func ExpectBody(
 }
 
 // See #7121
-func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
+func (s *RouterSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	defer func(orig *bufferPool) {
@@ -1117,7 +1108,7 @@ func (s *HandlerSuite) TestPutNeedsOnlyOneBuffer(c *check.C) {
 
 // Invoke the PutBlockHandler a bunch of times to test for bufferpool resource
 // leak.
-func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
+func (s *RouterSuite) TestPutHandlerNoBufferleak(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	ok := make(chan bool)
@@ -1148,7 +1139,7 @@ func (s *HandlerSuite) TestPutHandlerNoBufferleak(c *check.C) {
 	}
 }
 
-func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
+func (s *RouterSuite) TestGetHandlerClientDisconnect(c *check.C) {
 	s.cluster.Collections.BlobSigning = false
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
@@ -1187,7 +1178,7 @@ func (s *HandlerSuite) TestGetHandlerClientDisconnect(c *check.C) {
 
 // Invoke the GetBlockHandler a bunch of times to test for bufferpool resource
 // leak.
-func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
+func (s *RouterSuite) TestGetHandlerNoBufferLeak(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	vols := s.handler.volmgr.AllWritable()
@@ -1223,7 +1214,7 @@ func (s *HandlerSuite) TestGetHandlerNoBufferLeak(c *check.C) {
 	}
 }
 
-func (s *HandlerSuite) TestPutStorageClasses(c *check.C) {
+func (s *RouterSuite) TestPutStorageClasses(c *check.C) {
 	s.cluster.Volumes = map[string]arvados.Volume{
 		"zzzzz-nyw5e-000000000000000": {Replication: 1, Driver: "mock"}, // "default" is implicit
 		"zzzzz-nyw5e-111111111111111": {Replication: 1, Driver: "mock", StorageClasses: map[string]bool{"special": true, "extra": true}},
@@ -1280,7 +1271,7 @@ func sortCommaSeparated(s string) string {
 	return strings.Join(slice, ", ")
 }
 
-func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
+func (s *RouterSuite) TestPutResponseHeader(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	resp := IssueRequest(s.handler, &RequestTester{
@@ -1293,7 +1284,7 @@ func (s *HandlerSuite) TestPutResponseHeader(c *check.C) {
 	c.Check(resp.Header().Get("X-Keep-Storage-Classes-Confirmed"), check.Equals, "default=1")
 }
 
-func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
+func (s *RouterSuite) TestUntrashHandler(c *check.C) {
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 
 	// Set up Keep volumes
@@ -1364,7 +1355,7 @@ func (s *HandlerSuite) TestUntrashHandler(c *check.C) {
 	c.Check(response.Body.String(), check.Equals, "Successfully untrashed on: [MockVolume], [MockVolume]\n")
 }
 
-func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
+func (s *RouterSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
 	// Change all volumes to read-only
 	for uuid, v := range s.cluster.Volumes {
 		v.ReadOnly = true
@@ -1385,7 +1376,7 @@ func (s *HandlerSuite) TestUntrashHandlerWithNoWritableVolumes(c *check.C) {
 		response)
 }
 
-func (s *HandlerSuite) TestHealthCheckPing(c *check.C) {
+func (s *RouterSuite) TestHealthCheckPing(c *check.C) {
 	s.cluster.ManagementToken = arvadostest.ManagementToken
 	c.Assert(s.handler.setup(context.Background(), s.cluster, "", prometheus.NewRegistry(), testServiceURL), check.IsNil)
 	pingReq := &RequestTester{
diff --git a/services/keepstore/s3aws_volume_test.go b/services/keepstore/s3aws_volume_test.go
index c7e2d485df..f322c5ec3b 100644
--- a/services/keepstore/s3aws_volume_test.go
+++ b/services/keepstore/s3aws_volume_test.go
@@ -58,7 +58,7 @@ type StubbedS3AWSSuite struct {
 	s3server *httptest.Server
 	metadata *httptest.Server
 	cluster  *arvados.Cluster
-	handler  *handler
+	handler  *router
 	volumes  []*TestableS3AWSVolume
 }
 
@@ -70,7 +70,7 @@ func (s *StubbedS3AWSSuite) SetUpTest(c *check.C) {
 		"zzzzz-nyw5e-000000000000000": {Driver: "S3"},
 		"zzzzz-nyw5e-111111111111111": {Driver: "S3"},
 	}
-	s.handler = &handler{}
+	s.handler = newHandlerOrErrorHandler(context.Background(), s.cluster, s.cluster.SystemRootToken, prometheus.NewRegistry()).(*router)
 }
 
 func (s *StubbedS3AWSSuite) TestGeneric(c *check.C) {
diff --git a/services/keepstore/status_test.go b/services/keepstore/status_test.go
index 80f98adb22..a617798ab5 100644
--- a/services/keepstore/status_test.go
+++ b/services/keepstore/status_test.go
@@ -14,7 +14,7 @@ import (
 
 // getStatusItem("foo","bar","baz") retrieves /status.json, decodes
 // the response body into resp, and returns resp["foo"]["bar"]["baz"].
-func getStatusItem(h *handler, keys ...string) interface{} {
+func getStatusItem(h *router, keys ...string) interface{} {
 	resp := IssueRequest(h, &RequestTester{"/status.json", "", "GET", nil, ""})
 	var s interface{}
 	json.NewDecoder(resp.Body).Decode(&s)
diff --git a/services/keepstore/streamwriterat_test.go b/services/keepstore/streamwriterat_test.go
index c9db3b6eaf..1769a9c205 100644
--- a/services/keepstore/streamwriterat_test.go
+++ b/services/keepstore/streamwriterat_test.go
@@ -8,7 +8,7 @@ import (
 	"bytes"
 	"sync"
 
-	_ "gopkg.in/check.v1"
+	. "gopkg.in/check.v1"
 )
 
 var _ = Suite(&streamWriterAtSuite{})
diff --git a/services/keepstore/volume_test.go b/services/keepstore/volume_test.go
index 950b3989aa..b353f12ef6 100644
--- a/services/keepstore/volume_test.go
+++ b/services/keepstore/volume_test.go
@@ -43,18 +43,18 @@ var (
 // underlying Volume, in order to test behavior in cases that are
 // impractical to achieve with a sequence of normal Volume operations.
 type TestableVolume interface {
-	Volume
+	volume
 
 	// [Over]write content for a locator with the given data,
 	// bypassing all constraints like readonly and serialize.
-	PutRaw(locator string, data []byte)
+	BlockWriteRaw(locator string, data []byte)
 
 	// Returns the strings that a driver uses to record read/write operations.
 	ReadWriteOperationLabelValues() (r, w string)
 
 	// Specify the value Mtime() should return, until the next
-	// call to Touch, TouchWithDate, or Put.
-	TouchWithDate(locator string, lastPut time.Time)
+	// call to Touch, TouchWithDate, or BlockWrite.
+	TouchWithDate(locator string, lastBlockWrite time.Time)
 
 	// Clean up, delete temporary files.
 	Teardown()
@@ -74,7 +74,7 @@ type MockVolume struct {
 	BadVolumeError error
 
 	// Touchable volumes' Touch() method succeeds for a locator
-	// that has been Put().
+	// that has been BlockWrite().
 	Touchable bool
 
 	// Gate is a "starting gate", allowing test cases to pause
@@ -96,7 +96,7 @@ type MockVolume struct {
 
 // newMockVolume returns a non-Bad, non-Readonly, Touchable mock
 // volume.
-func newMockVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
+func newMockVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (volume, error) {
 	gate := make(chan struct{})
 	close(gate)
 	return &MockVolume{
@@ -164,8 +164,8 @@ func (v *MockVolume) Get(ctx context.Context, loc string, buf []byte) (int, erro
 	return 0, os.ErrNotExist
 }
 
-func (v *MockVolume) Put(ctx context.Context, loc string, block []byte) error {
-	v.gotCall("Put")
+func (v *MockVolume) BlockWrite(ctx context.Context, loc string, block []byte) error {
+	v.gotCall("BlockWrite")
 	<-v.Gate
 	if v.Bad {
 		return v.BadVolumeError

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list