[ARVADOS] updated: b1e2f45d0a926617c991410feda842a5056ff5f0

Git user git at public.curoverse.com
Mon Nov 21 11:53:29 EST 2016


Summary of changes:
 doc/_includes/_crunch1only_begin.liquid                            | 2 ++
 doc/_includes/_crunch1only_end.liquid                              | 1 +
 doc/_includes/_notebox_begin_warning.liquid                        | 2 ++
 doc/_includes/_pipeline_deprecation_notice.liquid                  | 2 +-
 doc/install/install-keep-web.html.textile.liquid                   | 2 +-
 doc/install/install-keepproxy.html.textile.liquid                  | 2 +-
 doc/user/topics/arv-run.html.textile.liquid                        | 4 ++++
 doc/user/topics/running-pipeline-command-line.html.textile.liquid  | 4 ++++
 doc/user/tutorials/tutorial-pipeline-workbench.html.textile.liquid | 4 ++++
 sdk/cli/bin/crunch-job                                             | 6 +++---
 10 files changed, 23 insertions(+), 6 deletions(-)
 create mode 100644 doc/_includes/_crunch1only_begin.liquid
 create mode 100644 doc/_includes/_crunch1only_end.liquid
 create mode 100644 doc/_includes/_notebox_begin_warning.liquid

  discards  36b87614def039450d9e081bd0398f844b2e2282 (commit)
  discards  ff7cdb99a70935ed237234c7fc9026ed9b83b895 (commit)
  discards  a323f88be2e4112e98c337d92aef8d137c3865a1 (commit)
  discards  068da0eea34bf88568ac9eb729a1d83163b7b25a (commit)
  discards  b8ed24a53f163c19b58e6e4d671a3dd7cbe8a088 (commit)
  discards  d46753814e6954eeca07e3c40ea9d82b4f3d95e7 (commit)
  discards  1c49eca432bfc369685277171351819b2faec423 (commit)
  discards  72bacc1bf23f97bd0b4676949bbcdc443f9117d5 (commit)
  discards  33c69bd15c1920db93f874ef7c07e46f7e0cf92b (commit)
       via  b1e2f45d0a926617c991410feda842a5056ff5f0 (commit)
       via  bf08ad601e8c69e812dcfd5fd88cd711d35647ae (commit)
       via  8c716ec575b5f7679a2ab95ebea944a46ff756c1 (commit)
       via  d542de20d8617f5823ab8f675c114f78aaf4a924 (commit)
       via  515a58c0ef8634fca2397a8609f868524a42132c (commit)
       via  4568673894b4a752503ad403bd391767ac1805e5 (commit)
       via  b45d7c92a23390c8be246219a1c84b8736854581 (commit)
       via  78889e115e6fffd5eb82e54a541bd4858f804f91 (commit)
       via  c1ebef70f3b66080b51ef700383f44d70736f495 (commit)
       via  5977b70a38e7102a6a369074897af990944c8934 (commit)
       via  1071e1163f894c2a73df76cd400d102748e5281d (commit)
       via  329b27f1407d900f8de7872077e6c91ebb32107c (commit)
       via  42f433ac4486c18fa6408d5f942dc394e5ff149e (commit)

This update added new revisions after undoing existing revisions.  That is
to say, the old revision is not a strict subset of the new revision.  This
situation occurs when you --force push a change and generate a repository
containing something like this:

 * -- * -- B -- O -- O -- O (36b87614def039450d9e081bd0398f844b2e2282)
            \
             N -- N -- N (b1e2f45d0a926617c991410feda842a5056ff5f0)

When this happens we assume that you've already had alert emails for all
of the O revisions, and so we here report only the revisions in the N
branch from the common base, B.

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit b1e2f45d0a926617c991410feda842a5056ff5f0
Author: Tom Clegg <tom at curoverse.com>
Date:   Thu Nov 17 17:24:05 2016 -0500

    10484: Remove unused volumeStats.

diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 0fdf15c..27ac0d9 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -149,8 +149,7 @@ type S3Volume struct {
 	ReadOnly           bool
 	UnsafeDelete       bool
 
-	bucket      *s3bucket
-	volumeStats ioStats
+	bucket *s3bucket
 
 	startOnce sync.Once
 }

commit bf08ad601e8c69e812dcfd5fd88cd711d35647ae
Author: Tom Clegg <tom at curoverse.com>
Date:   Wed Nov 16 18:10:32 2016 -0500

    10484: Test s3 bucket stats.

diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go
index 63b1862..10e9158 100644
--- a/services/keepstore/s3_volume_test.go
+++ b/services/keepstore/s3_volume_test.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"context"
 	"crypto/md5"
+	"encoding/json"
 	"fmt"
 	"io/ioutil"
 	"log"
@@ -82,6 +83,35 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) {
 	}
 }
 
+func (s *StubbedS3Suite) TestStats(c *check.C) {
+	v := s.newTestableVolume(c, 5*time.Minute, false, 2)
+	stats := func() string {
+		buf, err := json.Marshal(v.InternalStats())
+		c.Check(err, check.IsNil)
+		return string(buf)
+	}
+
+	c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
+
+	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+	_, err := v.Get(context.Background(), loc, make([]byte, 3))
+	c.Check(err, check.NotNil)
+	c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
+	c.Check(stats(), check.Matches, `.*"\*s3.Error 404 [^"]*":[^0].*`)
+	c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
+
+	err = v.Put(context.Background(), loc, []byte("foo"))
+	c.Check(err, check.IsNil)
+	c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
+	c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
+
+	_, err = v.Get(context.Background(), loc, make([]byte, 3))
+	c.Check(err, check.IsNil)
+	_, err = v.Get(context.Background(), loc, make([]byte, 3))
+	c.Check(err, check.IsNil)
+	c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
+}
+
 func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
 	defer func(tl, bs arvados.Duration) {
 		theConfig.TrashLifetime = tl

commit 8c716ec575b5f7679a2ab95ebea944a46ff756c1
Author: Tom Clegg <tom at curoverse.com>
Date:   Wed Nov 16 17:51:24 2016 -0500

    10484: Tidy up stats-tracking code into a bucket proxy type.

diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 3b843e0..0fdf15c 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -149,28 +149,12 @@ type S3Volume struct {
 	ReadOnly           bool
 	UnsafeDelete       bool
 
-	bucket      *s3.Bucket
-	bucketStats bucketStats
+	bucket      *s3bucket
 	volumeStats ioStats
 
 	startOnce sync.Once
 }
 
-type bucketStats struct {
-	Errors   uint64
-	Ops      uint64
-	GetOps   uint64
-	PutOps   uint64
-	HeadOps  uint64
-	DelOps   uint64
-	InBytes  uint64
-	OutBytes uint64
-
-	ErrorCodes map[string]uint64 `json:",omitempty"`
-
-	lock sync.Mutex
-}
-
 // Examples implements VolumeWithExamples.
 func (*S3Volume) Examples() []Volume {
 	return []Volume{
@@ -248,9 +232,11 @@ func (v *S3Volume) Start() error {
 	client := s3.New(auth, region)
 	client.ConnectTimeout = time.Duration(v.ConnectTimeout)
 	client.ReadTimeout = time.Duration(v.ReadTimeout)
-	v.bucket = &s3.Bucket{
-		S3:   client,
-		Name: v.Bucket,
+	v.bucket = &s3bucket{
+		Bucket: &s3.Bucket{
+			S3:   client,
+			Name: v.Bucket,
+		},
 	}
 	return nil
 }
@@ -282,19 +268,14 @@ func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io
 // disappeared in a Trash race, getReader calls fixRace to recover the
 // data, and tries again.
 func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	err = v.translateError(err)
-	if err == nil {
-		rdr = NewCountingReader(rdr, v.tickInBytes)
-		return
-	} else if !os.IsNotExist(v.tickErr(err)) {
+	if err == nil || !os.IsNotExist(err) {
 		return
 	}
 
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err = v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if err != nil {
 		// If we can't read recent/X, there's no point in
 		// trying fixRace. Give up.
@@ -305,13 +286,11 @@ func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
 		return
 	}
 
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	if err != nil {
 		log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
-		err = v.translateError(v.tickErr(err))
+		err = v.translateError(err)
 	}
-	rdr = NewCountingReader(rdr, v.tickInBytes)
 	return
 }
 
@@ -396,16 +375,11 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
 			}
 		}()
 		defer close(ready)
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
-		rdr := NewCountingReader(bufr, v.tickOutBytes)
-		err = v.bucket.PutReader(loc, rdr, int64(size), "application/octet-stream", s3ACL, opts)
+		err = v.bucket.PutReader(loc, bufr, int64(size), "application/octet-stream", s3ACL, opts)
 		if err != nil {
-			v.tickErr(err)
 			return
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-		v.tickErr(err)
 	}()
 	select {
 	case <-ctx.Done():
@@ -429,44 +403,38 @@ func (v *S3Volume) Touch(loc string) error {
 	if v.ReadOnly {
 		return MethodDisabledError
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if os.IsNotExist(err) && v.fixRace(loc) {
 		// The data object got trashed in a race, but fixRace
 		// rescued it.
 	} else if err != nil {
 		return err
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(v.tickErr(err))
+	return v.translateError(err)
 }
 
 // Mtime returns the stored timestamp for the given locator.
 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
 	if err != nil {
-		return zeroTime, v.translateError(v.tickErr(err))
+		return zeroTime, v.translateError(err)
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if os.IsNotExist(err) {
 		// The data object X exists, but recent/X is missing.
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
 		if err != nil {
 			log.Printf("error: creating %q: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(v.tickErr(err))
+			return zeroTime, v.translateError(err)
 		}
 		log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		resp, err = v.bucket.Head("recent/"+loc, nil)
 		if err != nil {
 			log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(v.tickErr(err))
+			return zeroTime, v.translateError(err)
 		}
 	} else if err != nil {
 		// HEAD recent/X failed for some other reason.
@@ -480,16 +448,19 @@ func (v *S3Volume) Mtime(loc string) (time.Time, error) {
 func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
 	// Use a merge sort to find matching sets of X and recent/X.
 	dataL := s3Lister{
-		Bucket:   v.bucket,
+		Bucket:   v.bucket.Bucket,
 		Prefix:   prefix,
 		PageSize: v.IndexPageSize,
 	}
 	recentL := s3Lister{
-		Bucket:   v.bucket,
+		Bucket:   v.bucket.Bucket,
 		Prefix:   "recent/" + prefix,
 		PageSize: v.IndexPageSize,
 	}
+	v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+	v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 	for data, recent := dataL.First(), recentL.First(); data != nil; data = dataL.Next() {
+		v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 		if data.Key >= "g" {
 			// Conveniently, "recent/*" and "trash/*" are
 			// lexically greater than all hex-encoded data
@@ -511,10 +482,12 @@ func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
 		for recent != nil {
 			if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
 				recent = recentL.Next()
+				v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 				continue
 			} else if cmp == 0 {
 				stamp = recent
 				recent = recentL.Next()
+				v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 				break
 			} else {
 				// recent/X marker is missing: we'll
@@ -546,8 +519,7 @@ func (v *S3Volume) Trash(loc string) error {
 		if !s3UnsafeDelete {
 			return ErrS3TrashDisabled
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
-		return v.translateError(v.tickErr(v.bucket.Del(loc)))
+		return v.translateError(v.bucket.Del(loc))
 	}
 	err := v.checkRaceWindow(loc)
 	if err != nil {
@@ -557,16 +529,14 @@ func (v *S3Volume) Trash(loc string) error {
 	if err != nil {
 		return err
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
-	return v.translateError(v.tickErr(v.bucket.Del(loc)))
+	return v.translateError(v.bucket.Del(loc))
 }
 
 // checkRaceWindow returns a non-nil error if trash/loc is, or might
 // be, in the race window (i.e., it's not safe to trash loc).
 func (v *S3Volume) checkRaceWindow(loc string) error {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("trash/"+loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if os.IsNotExist(err) {
 		// OK, trash/X doesn't exist so we're not in the race
 		// window
@@ -599,12 +569,11 @@ func (v *S3Volume) checkRaceWindow(loc string) error {
 // (PutCopy returns 200 OK if the request was received, even if the
 // copy failed).
 func (v *S3Volume) safeCopy(dst, src string) error {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
 		ContentType:       "application/octet-stream",
 		MetadataDirective: "REPLACE",
 	}, v.bucket.Name+"/"+src)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if err != nil {
 		return err
 	}
@@ -638,9 +607,8 @@ func (v *S3Volume) Untrash(loc string) error {
 	if err != nil {
 		return err
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(v.tickErr(err))
+	return v.translateError(err)
 }
 
 // Status returns a *VolumeStatus representing the current in-use
@@ -654,9 +622,9 @@ func (v *S3Volume) Status() *VolumeStatus {
 	}
 }
 
-// IOStatus implements InternalStatser.
+// InternalStats returns bucket I/O and API call counters.
 func (v *S3Volume) InternalStats() interface{} {
-	return &v.bucketStats
+	return &v.bucket.stats
 }
 
 // String implements fmt.Stringer.
@@ -687,10 +655,9 @@ func (v *S3Volume) isKeepBlock(s string) bool {
 // there was a race between Put and Trash, fixRace recovers from the
 // race by Untrashing the block.
 func (v *S3Volume) fixRace(loc string) bool {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	trash, err := v.bucket.Head("trash/"+loc, nil)
 	if err != nil {
-		if !os.IsNotExist(v.translateError(v.tickErr(err))) {
+		if !os.IsNotExist(v.translateError(err)) {
 			log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
 		}
 		return false
@@ -701,10 +668,8 @@ func (v *S3Volume) fixRace(loc string) bool {
 		return false
 	}
 
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	recent, err := v.bucket.Head("recent/"+loc, nil)
 	if err != nil {
-		v.tickErr(err)
 		log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
 		return false
 	}
@@ -753,7 +718,7 @@ func (v *S3Volume) EmptyTrash() {
 
 	// Use a merge sort to find matching sets of trash/X and recent/X.
 	trashL := s3Lister{
-		Bucket:   v.bucket,
+		Bucket:   v.bucket.Bucket,
 		Prefix:   "trash/",
 		PageSize: v.IndexPageSize,
 	}
@@ -772,9 +737,8 @@ func (v *S3Volume) EmptyTrash() {
 			log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
 			continue
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		recent, err := v.bucket.Head("recent/"+loc, nil)
-		if err != nil && os.IsNotExist(v.translateError(v.tickErr(err))) {
+		if err != nil && os.IsNotExist(v.translateError(err)) {
 			log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
 			err = v.Untrash(loc)
 			if err != nil {
@@ -805,9 +769,8 @@ func (v *S3Volume) EmptyTrash() {
 				v.Touch(loc)
 				continue
 			}
-			v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 			_, err := v.bucket.Head(loc, nil)
-			if os.IsNotExist(v.tickErr(err)) {
+			if os.IsNotExist(err) {
 				log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
 				v.fixRace(loc)
 				continue
@@ -819,23 +782,18 @@ func (v *S3Volume) EmptyTrash() {
 		if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
 			continue
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 		err = v.bucket.Del(trash.Key)
 		if err != nil {
-			v.tickErr(err)
 			log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
 			continue
 		}
 		bytesDeleted += trash.Size
 		blocksDeleted++
 
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		_, err = v.bucket.Head(loc, nil)
-		if os.IsNotExist(v.tickErr(err)) {
-			v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+		if os.IsNotExist(err) {
 			err = v.bucket.Del("recent/" + loc)
 			if err != nil {
-				v.tickErr(err)
 				log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
 			}
 		} else if err != nil {
@@ -848,38 +806,6 @@ func (v *S3Volume) EmptyTrash() {
 	log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
-func (v *S3Volume) tick(counters ...*uint64) {
-	for _, counter := range counters {
-		atomic.AddUint64(counter, 1)
-	}
-}
-
-func (v *S3Volume) tickErr(err error) error {
-	if err == nil {
-		return nil
-	}
-	atomic.AddUint64(&v.bucketStats.Errors, 1)
-	errStr := fmt.Sprintf("%T", err)
-	if err, ok := err.(*s3.Error); ok {
-		errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
-	}
-	v.bucketStats.lock.Lock()
-	if v.bucketStats.ErrorCodes == nil {
-		v.bucketStats.ErrorCodes = make(map[string]uint64)
-	}
-	v.bucketStats.ErrorCodes[errStr]++
-	v.bucketStats.lock.Unlock()
-	return err
-}
-
-func (v *S3Volume) tickInBytes(n uint64) {
-	atomic.AddUint64(&v.bucketStats.InBytes, n)
-}
-
-func (v *S3Volume) tickOutBytes(n uint64) {
-	atomic.AddUint64(&v.bucketStats.OutBytes, n)
-}
-
 type s3Lister struct {
 	Bucket     *s3.Bucket
 	Prefix     string
@@ -938,3 +864,91 @@ func (lister *s3Lister) pop() (k *s3.Key) {
 	}
 	return
 }
+
+// s3bucket wraps s3.bucket and counts I/O and API usage stats.
+type s3bucket struct {
+	*s3.Bucket
+	stats s3bucketStats
+}
+
+func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
+	rdr, err := b.Bucket.GetReader(path)
+	b.stats.tick(&b.stats.Ops, &b.stats.GetOps)
+	b.stats.tickErr(err)
+	return NewCountingReader(rdr, b.stats.tickInBytes), err
+}
+
+func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
+	resp, err := b.Bucket.Head(path, headers)
+	b.stats.tick(&b.stats.Ops, &b.stats.HeadOps)
+	b.stats.tickErr(err)
+	return resp, err
+}
+
+func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
+	err := b.Bucket.PutReader(path, NewCountingReader(r, b.stats.tickOutBytes), length, contType, perm, options)
+	b.stats.tick(&b.stats.Ops, &b.stats.PutOps)
+	b.stats.tickErr(err)
+	return err
+}
+
+func (b *s3bucket) Put(path string, data []byte, contType string, perm s3.ACL, options s3.Options) error {
+	err := b.Bucket.PutReader(path, NewCountingReader(bytes.NewBuffer(data), b.stats.tickOutBytes), int64(len(data)), contType, perm, options)
+	b.stats.tick(&b.stats.Ops, &b.stats.PutOps)
+	b.stats.tickErr(err)
+	return err
+}
+
+func (b *s3bucket) Del(path string) error {
+	err := b.Bucket.Del(path)
+	b.stats.tick(&b.stats.Ops, &b.stats.DelOps)
+	b.stats.tickErr(err)
+	return err
+}
+
+type s3bucketStats struct {
+	Errors   uint64
+	Ops      uint64
+	GetOps   uint64
+	PutOps   uint64
+	HeadOps  uint64
+	DelOps   uint64
+	ListOps  uint64
+	InBytes  uint64
+	OutBytes uint64
+
+	ErrorCodes map[string]uint64 `json:",omitempty"`
+
+	lock sync.Mutex
+}
+
+func (s *s3bucketStats) tickInBytes(n uint64) {
+	atomic.AddUint64(&s.InBytes, n)
+}
+
+func (s *s3bucketStats) tickOutBytes(n uint64) {
+	atomic.AddUint64(&s.OutBytes, n)
+}
+
+func (s *s3bucketStats) tick(counters ...*uint64) {
+	for _, counter := range counters {
+		atomic.AddUint64(counter, 1)
+	}
+}
+
+func (s *s3bucketStats) tickErr(err error) {
+	if err == nil {
+		return
+	}
+	atomic.AddUint64(&s.Errors, 1)
+	errStr := fmt.Sprintf("%T", err)
+	if err, ok := err.(*s3.Error); ok {
+		errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
+	}
+	s.lock.Lock()
+	if s.ErrorCodes == nil {
+		s.ErrorCodes = make(map[string]uint64)
+	}
+	s.ErrorCodes[errStr]++
+	s.lock.Unlock()
+}

commit d542de20d8617f5823ab8f675c114f78aaf4a924
Author: Tom Clegg <tom at curoverse.com>
Date:   Thu Nov 10 15:25:32 2016 -0500

    10484: Track non-s3 errors by Go type.

diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 48ba95b..3b843e0 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -859,15 +859,16 @@ func (v *S3Volume) tickErr(err error) error {
 		return nil
 	}
 	atomic.AddUint64(&v.bucketStats.Errors, 1)
+	errStr := fmt.Sprintf("%T", err)
 	if err, ok := err.(*s3.Error); ok {
-		errStr := fmt.Sprintf("%d %s", err.StatusCode, err.Code)
-		v.bucketStats.lock.Lock()
-		if v.bucketStats.ErrorCodes == nil {
-			v.bucketStats.ErrorCodes = make(map[string]uint64)
-		}
-		v.bucketStats.ErrorCodes[errStr]++
-		v.bucketStats.lock.Unlock()
+		errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
+	}
+	v.bucketStats.lock.Lock()
+	if v.bucketStats.ErrorCodes == nil {
+		v.bucketStats.ErrorCodes = make(map[string]uint64)
 	}
+	v.bucketStats.ErrorCodes[errStr]++
+	v.bucketStats.lock.Unlock()
 	return err
 }
 

commit 515a58c0ef8634fca2397a8609f868524a42132c
Author: Tom Clegg <tom at curoverse.com>
Date:   Thu Nov 10 15:21:26 2016 -0500

    10484: Track s3 errors by response code.

diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index c52f616..48ba95b 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -165,6 +165,10 @@ type bucketStats struct {
 	DelOps   uint64
 	InBytes  uint64
 	OutBytes uint64
+
+	ErrorCodes map[string]uint64 `json:",omitempty"`
+
+	lock sync.Mutex
 }
 
 // Examples implements VolumeWithExamples.
@@ -851,8 +855,18 @@ func (v *S3Volume) tick(counters ...*uint64) {
 }
 
 func (v *S3Volume) tickErr(err error) error {
-	if err != nil {
-		atomic.AddUint64(&v.bucketStats.Errors, 1)
+	if err == nil {
+		return nil
+	}
+	atomic.AddUint64(&v.bucketStats.Errors, 1)
+	if err, ok := err.(*s3.Error); ok {
+		errStr := fmt.Sprintf("%d %s", err.StatusCode, err.Code)
+		v.bucketStats.lock.Lock()
+		if v.bucketStats.ErrorCodes == nil {
+			v.bucketStats.ErrorCodes = make(map[string]uint64)
+		}
+		v.bucketStats.ErrorCodes[errStr]++
+		v.bucketStats.lock.Unlock()
 	}
 	return err
 }

commit 4568673894b4a752503ad403bd391767ac1805e5
Author: Tom Clegg <tom at curoverse.com>
Date:   Tue Nov 8 18:15:20 2016 -0500

    10484: Report volume IO stats for S3 volumes.

diff --git a/services/keepstore/count.go b/services/keepstore/count.go
new file mode 100644
index 0000000..a9f7436
--- /dev/null
+++ b/services/keepstore/count.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+	"io"
+)
+
+func NewCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
+	return &countingReadWriter{
+		writer:  w,
+		counter: f,
+	}
+}
+
+func NewCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
+	return &countingReadWriter{
+		reader:  r,
+		counter: f,
+	}
+}
+
+type countingReadWriter struct {
+	reader  io.Reader
+	writer  io.Writer
+	counter func(uint64)
+}
+
+func (crw *countingReadWriter) Read(buf []byte) (int, error) {
+	n, err := crw.reader.Read(buf)
+	crw.counter(uint64(n))
+	return n, err
+}
+
+func (crw *countingReadWriter) Write(buf []byte) (int, error) {
+	n, err := crw.writer.Write(buf)
+	crw.counter(uint64(n))
+	return n, err
+}
+
+func (crw *countingReadWriter) Close() error {
+	if c, ok := crw.writer.(io.Closer); ok {
+		return c.Close()
+	}
+	return nil
+}
diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
index 563d0c0..b51009e 100644
--- a/services/keepstore/handlers.go
+++ b/services/keepstore/handlers.go
@@ -249,9 +249,16 @@ type PoolStatus struct {
 	Len   int    `json:"BuffersInUse"`
 }
 
+type volumeStatusEnt struct {
+	Label         string
+	Status        *VolumeStatus `json:",omitempty"`
+	VolumeStats   *ioStats      `json:",omitempty"`
+	InternalStats interface{}   `json:",omitempty"`
+}
+
 // NodeStatus struct
 type NodeStatus struct {
-	Volumes    []*VolumeStatus `json:"volumes"`
+	Volumes    []*volumeStatusEnt
 	BufferPool PoolStatus
 	PullQueue  WorkQueueStatus
 	TrashQueue WorkQueueStatus
@@ -292,13 +299,20 @@ func StatusHandler(resp http.ResponseWriter, req *http.Request) {
 func readNodeStatus(st *NodeStatus) {
 	vols := KeepVM.AllReadable()
 	if cap(st.Volumes) < len(vols) {
-		st.Volumes = make([]*VolumeStatus, len(vols))
+		st.Volumes = make([]*volumeStatusEnt, len(vols))
 	}
 	st.Volumes = st.Volumes[:0]
 	for _, vol := range vols {
-		if s := vol.Status(); s != nil {
-			st.Volumes = append(st.Volumes, s)
+		var internalStats interface{}
+		if vol, ok := vol.(InternalStatser); ok {
+			internalStats = vol.InternalStats()
 		}
+		st.Volumes = append(st.Volumes, &volumeStatusEnt{
+			Label:         vol.String(),
+			Status:        vol.Status(),
+			InternalStats: internalStats,
+			//VolumeStats: KeepVM.VolumeStats(vol),
+		})
 	}
 	st.BufferPool.Alloc = bufs.Alloc()
 	st.BufferPool.Cap = bufs.Cap()
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 2f5f8d4..4eaaea8 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -114,6 +114,9 @@ func main() {
 	}
 
 	err = theConfig.Start()
+	if err != nil {
+		log.Fatal(err)
+	}
 
 	if pidfile := theConfig.PIDFile; pidfile != "" {
 		f, err := os.OpenFile(pidfile, os.O_RDWR|os.O_CREATE, 0777)
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 17923f8..c52f616 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -15,6 +15,7 @@ import (
 	"regexp"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -148,11 +149,24 @@ type S3Volume struct {
 	ReadOnly           bool
 	UnsafeDelete       bool
 
-	bucket *s3.Bucket
+	bucket      *s3.Bucket
+	bucketStats bucketStats
+	volumeStats ioStats
 
 	startOnce sync.Once
 }
 
+type bucketStats struct {
+	Errors   uint64
+	Ops      uint64
+	GetOps   uint64
+	PutOps   uint64
+	HeadOps  uint64
+	DelOps   uint64
+	InBytes  uint64
+	OutBytes uint64
+}
+
 // Examples implements VolumeWithExamples.
 func (*S3Volume) Examples() []Volume {
 	return []Volume{
@@ -264,13 +278,19 @@ func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io
 // disappeared in a Trash race, getReader calls fixRace to recover the
 // data, and tries again.
 func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	err = v.translateError(err)
-	if err == nil || !os.IsNotExist(err) {
+	if err == nil {
+		rdr = NewCountingReader(rdr, v.tickInBytes)
+		return
+	} else if !os.IsNotExist(v.tickErr(err)) {
 		return
 	}
+
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err = v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if err != nil {
 		// If we can't read recent/X, there's no point in
 		// trying fixRace. Give up.
@@ -280,11 +300,14 @@ func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
 		err = os.ErrNotExist
 		return
 	}
+
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	if err != nil {
 		log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
-		err = v.translateError(err)
+		err = v.translateError(v.tickErr(err))
 	}
+	rdr = NewCountingReader(rdr, v.tickInBytes)
 	return
 }
 
@@ -369,11 +392,16 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
 			}
 		}()
 		defer close(ready)
-		err = v.bucket.PutReader(loc, bufr, int64(size), "application/octet-stream", s3ACL, opts)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
+		rdr := NewCountingReader(bufr, v.tickOutBytes)
+		err = v.bucket.PutReader(loc, rdr, int64(size), "application/octet-stream", s3ACL, opts)
 		if err != nil {
+			v.tickErr(err)
 			return
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+		v.tickErr(err)
 	}()
 	select {
 	case <-ctx.Done():
@@ -397,38 +425,44 @@ func (v *S3Volume) Touch(loc string) error {
 	if v.ReadOnly {
 		return MethodDisabledError
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) && v.fixRace(loc) {
 		// The data object got trashed in a race, but fixRace
 		// rescued it.
 	} else if err != nil {
 		return err
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(err)
+	return v.translateError(v.tickErr(err))
 }
 
 // Mtime returns the stored timestamp for the given locator.
 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
 	if err != nil {
-		return zeroTime, v.translateError(err)
+		return zeroTime, v.translateError(v.tickErr(err))
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) {
 		// The data object X exists, but recent/X is missing.
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
 		if err != nil {
 			log.Printf("error: creating %q: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(err)
+			return zeroTime, v.translateError(v.tickErr(err))
 		}
 		log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		resp, err = v.bucket.Head("recent/"+loc, nil)
 		if err != nil {
 			log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(err)
+			return zeroTime, v.translateError(v.tickErr(err))
 		}
 	} else if err != nil {
 		// HEAD recent/X failed for some other reason.
@@ -508,7 +542,8 @@ func (v *S3Volume) Trash(loc string) error {
 		if !s3UnsafeDelete {
 			return ErrS3TrashDisabled
 		}
-		return v.bucket.Del(loc)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+		return v.translateError(v.tickErr(v.bucket.Del(loc)))
 	}
 	err := v.checkRaceWindow(loc)
 	if err != nil {
@@ -518,14 +553,16 @@ func (v *S3Volume) Trash(loc string) error {
 	if err != nil {
 		return err
 	}
-	return v.translateError(v.bucket.Del(loc))
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+	return v.translateError(v.tickErr(v.bucket.Del(loc)))
 }
 
 // checkRaceWindow returns a non-nil error if trash/loc is, or might
 // be, in the race window (i.e., it's not safe to trash loc).
 func (v *S3Volume) checkRaceWindow(loc string) error {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("trash/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) {
 		// OK, trash/X doesn't exist so we're not in the race
 		// window
@@ -558,11 +595,12 @@ func (v *S3Volume) checkRaceWindow(loc string) error {
 // (PutCopy returns 200 OK if the request was received, even if the
 // copy failed).
 func (v *S3Volume) safeCopy(dst, src string) error {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
 		ContentType:       "application/octet-stream",
 		MetadataDirective: "REPLACE",
 	}, v.bucket.Name+"/"+src)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if err != nil {
 		return err
 	}
@@ -596,8 +634,9 @@ func (v *S3Volume) Untrash(loc string) error {
 	if err != nil {
 		return err
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(err)
+	return v.translateError(v.tickErr(err))
 }
 
 // Status returns a *VolumeStatus representing the current in-use
@@ -611,9 +650,14 @@ func (v *S3Volume) Status() *VolumeStatus {
 	}
 }
 
+// IOStatus implements InternalStatser.
+func (v *S3Volume) InternalStats() interface{} {
+	return &v.bucketStats
+}
+
 // String implements fmt.Stringer.
 func (v *S3Volume) String() string {
-	return fmt.Sprintf("s3-bucket:%+q", v.bucket.Name)
+	return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
 }
 
 // Writable returns false if all future Put, Mtime, and Delete calls
@@ -639,9 +683,10 @@ func (v *S3Volume) isKeepBlock(s string) bool {
 // there was a race between Put and Trash, fixRace recovers from the
 // race by Untrashing the block.
 func (v *S3Volume) fixRace(loc string) bool {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	trash, err := v.bucket.Head("trash/"+loc, nil)
 	if err != nil {
-		if !os.IsNotExist(v.translateError(err)) {
+		if !os.IsNotExist(v.translateError(v.tickErr(err))) {
 			log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
 		}
 		return false
@@ -652,8 +697,10 @@ func (v *S3Volume) fixRace(loc string) bool {
 		return false
 	}
 
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	recent, err := v.bucket.Head("recent/"+loc, nil)
 	if err != nil {
+		v.tickErr(err)
 		log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
 		return false
 	}
@@ -721,8 +768,9 @@ func (v *S3Volume) EmptyTrash() {
 			log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
 			continue
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		recent, err := v.bucket.Head("recent/"+loc, nil)
-		if err != nil && os.IsNotExist(v.translateError(err)) {
+		if err != nil && os.IsNotExist(v.translateError(v.tickErr(err))) {
 			log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
 			err = v.Untrash(loc)
 			if err != nil {
@@ -752,7 +800,10 @@ func (v *S3Volume) EmptyTrash() {
 				v.fixRace(loc)
 				v.Touch(loc)
 				continue
-			} else if _, err := v.bucket.Head(loc, nil); os.IsNotExist(err) {
+			}
+			v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
+			_, err := v.bucket.Head(loc, nil)
+			if os.IsNotExist(v.tickErr(err)) {
 				log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
 				v.fixRace(loc)
 				continue
@@ -764,18 +815,23 @@ func (v *S3Volume) EmptyTrash() {
 		if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
 			continue
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 		err = v.bucket.Del(trash.Key)
 		if err != nil {
+			v.tickErr(err)
 			log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
 			continue
 		}
 		bytesDeleted += trash.Size
 		blocksDeleted++
 
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		_, err = v.bucket.Head(loc, nil)
-		if os.IsNotExist(err) {
+		if os.IsNotExist(v.tickErr(err)) {
+			v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 			err = v.bucket.Del("recent/" + loc)
 			if err != nil {
+				v.tickErr(err)
 				log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
 			}
 		} else if err != nil {
@@ -788,6 +844,27 @@ func (v *S3Volume) EmptyTrash() {
 	log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
+func (v *S3Volume) tick(counters ...*uint64) {
+	for _, counter := range counters {
+		atomic.AddUint64(counter, 1)
+	}
+}
+
+func (v *S3Volume) tickErr(err error) error {
+	if err != nil {
+		atomic.AddUint64(&v.bucketStats.Errors, 1)
+	}
+	return err
+}
+
+func (v *S3Volume) tickInBytes(n uint64) {
+	atomic.AddUint64(&v.bucketStats.InBytes, n)
+}
+
+func (v *S3Volume) tickOutBytes(n uint64) {
+	atomic.AddUint64(&v.bucketStats.OutBytes, n)
+}
+
 type s3Lister struct {
 	Bucket     *s3.Bucket
 	Prefix     string
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
index 57e18ab..b72258d 100644
--- a/services/keepstore/volume.go
+++ b/services/keepstore/volume.go
@@ -243,6 +243,10 @@ type VolumeManager interface {
 	// with more free space, etc.
 	NextWritable() Volume
 
+	// VolumeStats returns the ioStats used for tracking stats for
+	// the given Volume.
+	VolumeStats(Volume) *ioStats
+
 	// Close shuts down the volume manager cleanly.
 	Close()
 }
@@ -254,12 +258,16 @@ type RRVolumeManager struct {
 	readables []Volume
 	writables []Volume
 	counter   uint32
+	iostats   map[Volume]*ioStats
 }
 
 // MakeRRVolumeManager initializes RRVolumeManager
 func MakeRRVolumeManager(volumes []Volume) *RRVolumeManager {
-	vm := &RRVolumeManager{}
+	vm := &RRVolumeManager{
+		iostats: make(map[Volume]*ioStats),
+	}
 	for _, v := range volumes {
+		vm.iostats[v] = &ioStats{}
 		vm.readables = append(vm.readables, v)
 		if v.Writable() {
 			vm.writables = append(vm.writables, v)
@@ -287,18 +295,35 @@ func (vm *RRVolumeManager) NextWritable() Volume {
 	return vm.writables[i%uint32(len(vm.writables))]
 }
 
+// VolumeStats returns an ioStats for the given volume.
+func (vm *RRVolumeManager) VolumeStats(v Volume) *ioStats {
+	return vm.iostats[v]
+}
+
 // Close the RRVolumeManager
 func (vm *RRVolumeManager) Close() {
 }
 
-// VolumeStatus provides status information of the volume consisting of:
-//   * mount_point
-//   * device_num (an integer identifying the underlying storage system)
-//   * bytes_free
-//   * bytes_used
+// VolumeStatus describes the current condition of a volume
 type VolumeStatus struct {
-	MountPoint string `json:"mount_point"`
-	DeviceNum  uint64 `json:"device_num"`
-	BytesFree  uint64 `json:"bytes_free"`
-	BytesUsed  uint64 `json:"bytes_used"`
+	MountPoint string
+	DeviceNum  uint64
+	BytesFree  uint64
+	BytesUsed  uint64
+}
+
+// ioStats tracks I/O statistics for a volume or server
+type ioStats struct {
+	Errors     uint64
+	Ops        uint64
+	CompareOps uint64
+	GetOps     uint64
+	PutOps     uint64
+	TouchOps   uint64
+	InBytes    uint64
+	OutBytes   uint64
+}
+
+type InternalStatser interface {
+	InternalStats() interface{}
 }
diff --git a/services/keepstore/volume_unix.go b/services/keepstore/volume_unix.go
index 5239ed3..f9812b0 100644
--- a/services/keepstore/volume_unix.go
+++ b/services/keepstore/volume_unix.go
@@ -322,7 +322,12 @@ func (v *UnixVolume) Status() *VolumeStatus {
 	// uses fs.Blocks - fs.Bfree.
 	free := fs.Bavail * uint64(fs.Bsize)
 	used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
-	return &VolumeStatus{v.Root, devnum, free, used}
+	return &VolumeStatus{
+		MountPoint: v.Root,
+		DeviceNum:  devnum,
+		BytesFree:  free,
+		BytesUsed:  used,
+	}
 }
 
 var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)

commit b45d7c92a23390c8be246219a1c84b8736854581
Author: Tom Clegg <tom at curoverse.com>
Date:   Tue Nov 8 11:04:08 2016 -0500

    10484: Serve MemStats at /debug.json instead of /status.json.

diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
index 289dce1..563d0c0 100644
--- a/services/keepstore/handlers.go
+++ b/services/keepstore/handlers.go
@@ -46,6 +46,9 @@ func MakeRESTRouter() *mux.Router {
 	// Privileged client only.
 	rest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
 
+	// Internals/debugging info (runtime.MemStats)
+	rest.HandleFunc(`/debug.json`, DebugHandler).Methods("GET", "HEAD")
+
 	// List volumes: path, device number, bytes used/avail.
 	rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
 
@@ -239,18 +242,6 @@ func IndexHandler(resp http.ResponseWriter, req *http.Request) {
 	resp.Write([]byte{'\n'})
 }
 
-// StatusHandler
-//     Responds to /status.json requests with the current node status,
-//     described in a JSON structure.
-//
-//     The data given in a status.json response includes:
-//        volumes - a list of Keep volumes currently in use by this server
-//          each volume is an object with the following fields:
-//            * mount_point
-//            * device_num (an integer identifying the underlying filesystem)
-//            * bytes_free
-//            * bytes_used
-
 // PoolStatus struct
 type PoolStatus struct {
 	Alloc uint64 `json:"BytesAllocated"`
@@ -264,12 +255,24 @@ type NodeStatus struct {
 	BufferPool PoolStatus
 	PullQueue  WorkQueueStatus
 	TrashQueue WorkQueueStatus
-	Memory     runtime.MemStats
 }
 
 var st NodeStatus
 var stLock sync.Mutex
 
+// DebugHandler addresses /debug.json requests.
+func DebugHandler(resp http.ResponseWriter, req *http.Request) {
+	type debugStats struct {
+		MemStats runtime.MemStats
+	}
+	var ds debugStats
+	runtime.ReadMemStats(&ds.MemStats)
+	err := json.NewEncoder(resp).Encode(&ds)
+	if err != nil {
+		http.Error(resp, err.Error(), 500)
+	}
+}
+
 // StatusHandler addresses /status.json requests.
 func StatusHandler(resp http.ResponseWriter, req *http.Request) {
 	stLock.Lock()
@@ -302,7 +305,6 @@ func readNodeStatus(st *NodeStatus) {
 	st.BufferPool.Len = bufs.Len()
 	st.PullQueue = getWorkQueueStatus(pullq)
 	st.TrashQueue = getWorkQueueStatus(trashq)
-	runtime.ReadMemStats(&st.Memory)
 }
 
 // return a WorkQueueStatus for the given queue. If q is nil (which

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list