[ARVADOS] updated: 943eb0abcf1dbcfedb032456f270e79c568932d1

Git user git at public.curoverse.com
Tue Nov 8 17:37:47 EST 2016


Summary of changes:
 build/run-tests.sh                                 |   4 +-
 services/keepstore/azure_blob_volume.go            |   9 +-
 services/keepstore/azure_blob_volume_test.go       |  11 +-
 services/keepstore/collision.go                    |  16 +-
 services/keepstore/config.go                       |   9 +
 services/keepstore/config_test.go                  |   9 +
 services/keepstore/handler_test.go                 |  25 +--
 services/keepstore/handlers.go                     | 109 +++++++---
 .../keepstore/handlers_with_generic_volume_test.go |  15 +-
 services/keepstore/keepstore.go                    |   3 +
 services/keepstore/keepstore_test.go               |  49 ++---
 services/keepstore/pull_worker.go                  |   3 +-
 services/keepstore/s3_volume.go                    | 229 +++++++++++++++++----
 services/keepstore/s3_volume_test.go               |   9 +-
 services/keepstore/trash_worker_test.go            |  19 +-
 services/keepstore/volume.go                       |  52 +++--
 services/keepstore/volume_generic_test.go          |  87 ++++----
 services/keepstore/volume_test.go                  |   7 +-
 services/keepstore/volume_unix.go                  |  30 ++-
 services/keepstore/volume_unix_test.go             |  37 ++--
 20 files changed, 510 insertions(+), 222 deletions(-)
 create mode 100644 services/keepstore/config_test.go

  discards  65ea9196610e866c94d4f846c8488a125fa7f820 (commit)
       via  943eb0abcf1dbcfedb032456f270e79c568932d1 (commit)
       via  9bc44b58589f397376b88ce7897ee39c92719b02 (commit)
       via  fd26f5508b755061046727fb652aa4141029e8b9 (commit)
       via  6fb784416db6651b33b921a0684c2f8de84410fc (commit)
       via  358b9e8cb0fb72db4f7c8966de175fbadca9adeb (commit)
       via  9411197dfa8ff4c7d935a395a04b5846c7b52ffd (commit)
       via  49a0efed4d774c060db94b9702760d33a4134a17 (commit)
       via  fecb5eb18b9cf15459de8eba44b6e545962d8cd4 (commit)
       via  fa774e69987932acfcabe81ca44d4d6c4fb596bf (commit)
       via  d3512add65497d1af8b8bbceff2296c803873f95 (commit)
       via  d3c5a48fddf2f07d93667f9fa8ca2456f1d8f63c (commit)
       via  4f42bd3f3b2c0526690c3368c9172ed89773e6f1 (commit)
       via  f3b231c69407299133a6eb5ff6066ae6136608e9 (commit)
       via  97b8ba6c2d2023f66cab62b7062cd0dbff837c67 (commit)
       via  ec27d93c1d8918ec509ec3c64ed11dcd51f28374 (commit)
       via  da13bb400f87fdd4157146e2d0b171b730fa3208 (commit)
       via  8040d45d59041859350c56cae195eb09a65a8dde (commit)
       via  9b1a9a3a7de01dc07270b950101d11ae96786de4 (commit)
       via  24c98a345046c650247e6515eeb6d3389e54b68c (commit)
       via  863570108a2c901a8eff22dc8a9bc72635ba7b95 (commit)

This update added new revisions after undoing existing revisions.  That is
to say, the old revision is not a strict subset of the new revision.  This
situation occurs when you --force push a change and generate a repository
containing something like this:

 * -- * -- B -- O -- O -- O (65ea9196610e866c94d4f846c8488a125fa7f820)
            \
             N -- N -- N (943eb0abcf1dbcfedb032456f270e79c568932d1)

When this happens we assume that you've already had alert emails for all
of the O revisions, and so we here report only the revisions in the N
branch from the common base, B.

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 943eb0abcf1dbcfedb032456f270e79c568932d1
Author: Tom Clegg <tom at curoverse.com>
Date:   Tue Nov 8 17:37:36 2016 -0500

    10470: Report volume IO stats for S3 volumes.

diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
index 563d0c0..af7bc92 100644
--- a/services/keepstore/handlers.go
+++ b/services/keepstore/handlers.go
@@ -249,9 +249,16 @@ type PoolStatus struct {
 	Len   int    `json:"BuffersInUse"`
 }
 
+type volumeStatusEnt struct {
+	Label         string
+	Status        *VolumeStatus `json:",omitempty"`
+	VolumeStats   *ioStats      `json:",omitempty"`
+	InternalStats interface{}   `json:",omitempty"`
+}
+
 // NodeStatus struct
 type NodeStatus struct {
-	Volumes    []*VolumeStatus `json:"volumes"`
+	Volumes    []*volumeStatusEnt
 	BufferPool PoolStatus
 	PullQueue  WorkQueueStatus
 	TrashQueue WorkQueueStatus
@@ -292,13 +299,20 @@ func StatusHandler(resp http.ResponseWriter, req *http.Request) {
 func readNodeStatus(st *NodeStatus) {
 	vols := KeepVM.AllReadable()
 	if cap(st.Volumes) < len(vols) {
-		st.Volumes = make([]*VolumeStatus, len(vols))
+		st.Volumes = make([]*volumeStatusEnt, len(vols))
 	}
 	st.Volumes = st.Volumes[:0]
 	for _, vol := range vols {
-		if s := vol.Status(); s != nil {
-			st.Volumes = append(st.Volumes, s)
+		var internalStats interface{}
+		if vol, ok := vol.(InternalStatser); ok {
+			internalStats = vol.InternalStats()
 		}
+		st.Volumes = append(st.Volumes, &volumeStatusEnt{
+			Label:         vol.String(),
+			Status:        vol.Status(),
+			VolumeStats:   KeepVM.VolumeStats(vol),
+			InternalStats: internalStats,
+		})
 	}
 	st.BufferPool.Alloc = bufs.Alloc()
 	st.BufferPool.Cap = bufs.Cap()
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 3fb86bc..6205042 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -114,6 +114,9 @@ func main() {
 	}
 
 	err = theConfig.Start()
+	if err != nil {
+		log.Fatal(err)
+	}
 
 	if pidfile := theConfig.PIDFile; pidfile != "" {
 		f, err := os.OpenFile(pidfile, os.O_RDWR|os.O_CREATE, 0777)
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 17923f8..d8c6884 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -15,6 +15,7 @@ import (
 	"regexp"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -148,11 +149,24 @@ type S3Volume struct {
 	ReadOnly           bool
 	UnsafeDelete       bool
 
-	bucket *s3.Bucket
+	bucket      *s3.Bucket
+	bucketStats bucketStats
+	volumeStats ioStats
 
 	startOnce sync.Once
 }
 
+type bucketStats struct {
+	Errors   uint64
+	Ops      uint64
+	GetOps   uint64
+	PutOps   uint64
+	HeadOps  uint64
+	DelOps   uint64
+	InBytes  uint64
+	OutBytes uint64
+}
+
 // Examples implements VolumeWithExamples.
 func (*S3Volume) Examples() []Volume {
 	return []Volume{
@@ -258,19 +272,36 @@ func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io
 	}
 }
 
+func (v *S3Volume) tick(counters ...*uint64) {
+	for _, counter := range counters {
+		atomic.AddUint64(counter, 1)
+	}
+}
+
+func (v *S3Volume) tickErr(err error) error {
+	if err != nil {
+		atomic.AddUint64(&v.bucketStats.Errors, 1)
+	}
+	return err
+}
+
 // getReader wraps (Bucket)GetReader.
 //
 // In situations where (Bucket)GetReader would fail because the block
 // disappeared in a Trash race, getReader calls fixRace to recover the
 // data, and tries again.
 func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	err = v.translateError(err)
 	if err == nil || !os.IsNotExist(err) {
+		v.tickErr(err)
 		return
 	}
+
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err = v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if err != nil {
 		// If we can't read recent/X, there's no point in
 		// trying fixRace. Give up.
@@ -280,11 +311,14 @@ func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
 		err = os.ErrNotExist
 		return
 	}
+
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	if err != nil {
 		log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
-		err = v.translateError(err)
+		err = v.translateError(v.tickErr(err))
 	}
+	rdr = NewCountingReader(rdr, v.tickInBytes)
 	return
 }
 
@@ -335,6 +369,14 @@ func (v *S3Volume) Compare(ctx context.Context, loc string, expect []byte) error
 	return v.translateError(compareReaderWithBuf(ctx, rdr, expect, loc[:32]))
 }
 
+func (v *S3Volume) tickInBytes(n uint64) {
+	atomic.AddUint64(&v.bucketStats.InBytes, n)
+}
+
+func (v *S3Volume) tickOutBytes(n uint64) {
+	atomic.AddUint64(&v.bucketStats.OutBytes, n)
+}
+
 // Put writes a block.
 func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
 	if v.ReadOnly {
@@ -369,11 +411,16 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
 			}
 		}()
 		defer close(ready)
-		err = v.bucket.PutReader(loc, bufr, int64(size), "application/octet-stream", s3ACL, opts)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
+		rdr := NewCountingReader(bufr, v.tickOutBytes)
+		err = v.bucket.PutReader(loc, rdr, int64(size), "application/octet-stream", s3ACL, opts)
 		if err != nil {
+			v.tickErr(err)
 			return
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+		v.tickErr(err)
 	}()
 	select {
 	case <-ctx.Done():
@@ -397,38 +444,44 @@ func (v *S3Volume) Touch(loc string) error {
 	if v.ReadOnly {
 		return MethodDisabledError
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) && v.fixRace(loc) {
 		// The data object got trashed in a race, but fixRace
 		// rescued it.
 	} else if err != nil {
 		return err
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(err)
+	return v.translateError(v.tickErr(err))
 }
 
 // Mtime returns the stored timestamp for the given locator.
 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
 	if err != nil {
-		return zeroTime, v.translateError(err)
+		return zeroTime, v.translateError(v.tickErr(err))
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) {
 		// The data object X exists, but recent/X is missing.
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
 		if err != nil {
 			log.Printf("error: creating %q: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(err)
+			return zeroTime, v.translateError(v.tickErr(err))
 		}
 		log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		resp, err = v.bucket.Head("recent/"+loc, nil)
 		if err != nil {
 			log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(err)
+			return zeroTime, v.translateError(v.tickErr(err))
 		}
 	} else if err != nil {
 		// HEAD recent/X failed for some other reason.
@@ -508,7 +561,8 @@ func (v *S3Volume) Trash(loc string) error {
 		if !s3UnsafeDelete {
 			return ErrS3TrashDisabled
 		}
-		return v.bucket.Del(loc)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+		return v.translateError(v.tickErr(v.bucket.Del(loc)))
 	}
 	err := v.checkRaceWindow(loc)
 	if err != nil {
@@ -518,14 +572,16 @@ func (v *S3Volume) Trash(loc string) error {
 	if err != nil {
 		return err
 	}
-	return v.translateError(v.bucket.Del(loc))
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+	return v.translateError(v.tickErr(v.bucket.Del(loc)))
 }
 
 // checkRaceWindow returns a non-nil error if trash/loc is, or might
 // be, in the race window (i.e., it's not safe to trash loc).
 func (v *S3Volume) checkRaceWindow(loc string) error {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("trash/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) {
 		// OK, trash/X doesn't exist so we're not in the race
 		// window
@@ -558,11 +614,12 @@ func (v *S3Volume) checkRaceWindow(loc string) error {
 // (PutCopy returns 200 OK if the request was received, even if the
 // copy failed).
 func (v *S3Volume) safeCopy(dst, src string) error {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
 		ContentType:       "application/octet-stream",
 		MetadataDirective: "REPLACE",
 	}, v.bucket.Name+"/"+src)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if err != nil {
 		return err
 	}
@@ -596,8 +653,9 @@ func (v *S3Volume) Untrash(loc string) error {
 	if err != nil {
 		return err
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(err)
+	return v.translateError(v.tickErr(err))
 }
 
 // Status returns a *VolumeStatus representing the current in-use
@@ -611,9 +669,14 @@ func (v *S3Volume) Status() *VolumeStatus {
 	}
 }
 
+// IOStatus implements InternalStatser.
+func (v *S3Volume) InternalStats() interface{} {
+	return &v.bucketStats
+}
+
 // String implements fmt.Stringer.
 func (v *S3Volume) String() string {
-	return fmt.Sprintf("s3-bucket:%+q", v.bucket.Name)
+	return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
 }
 
 // Writable returns false if all future Put, Mtime, and Delete calls
@@ -639,9 +702,10 @@ func (v *S3Volume) isKeepBlock(s string) bool {
 // there was a race between Put and Trash, fixRace recovers from the
 // race by Untrashing the block.
 func (v *S3Volume) fixRace(loc string) bool {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	trash, err := v.bucket.Head("trash/"+loc, nil)
 	if err != nil {
-		if !os.IsNotExist(v.translateError(err)) {
+		if !os.IsNotExist(v.translateError(v.tickErr(err))) {
 			log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
 		}
 		return false
@@ -652,8 +716,10 @@ func (v *S3Volume) fixRace(loc string) bool {
 		return false
 	}
 
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	recent, err := v.bucket.Head("recent/"+loc, nil)
 	if err != nil {
+		v.tickErr(err)
 		log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
 		return false
 	}
@@ -721,8 +787,9 @@ func (v *S3Volume) EmptyTrash() {
 			log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
 			continue
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		recent, err := v.bucket.Head("recent/"+loc, nil)
-		if err != nil && os.IsNotExist(v.translateError(err)) {
+		if err != nil && os.IsNotExist(v.translateError(v.tickErr(err))) {
 			log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
 			err = v.Untrash(loc)
 			if err != nil {
@@ -752,7 +819,10 @@ func (v *S3Volume) EmptyTrash() {
 				v.fixRace(loc)
 				v.Touch(loc)
 				continue
-			} else if _, err := v.bucket.Head(loc, nil); os.IsNotExist(err) {
+			}
+			v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
+			_, err := v.bucket.Head(loc, nil)
+			if os.IsNotExist(v.tickErr(err)) {
 				log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
 				v.fixRace(loc)
 				continue
@@ -764,18 +834,23 @@ func (v *S3Volume) EmptyTrash() {
 		if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
 			continue
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 		err = v.bucket.Del(trash.Key)
 		if err != nil {
+			v.tickErr(err)
 			log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
 			continue
 		}
 		bytesDeleted += trash.Size
 		blocksDeleted++
 
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		_, err = v.bucket.Head(loc, nil)
-		if os.IsNotExist(err) {
+		if os.IsNotExist(v.tickErr(err)) {
+			v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 			err = v.bucket.Del("recent/" + loc)
 			if err != nil {
+				v.tickErr(err)
 				log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
 			}
 		} else if err != nil {
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
index 57e18ab..b72258d 100644
--- a/services/keepstore/volume.go
+++ b/services/keepstore/volume.go
@@ -243,6 +243,10 @@ type VolumeManager interface {
 	// with more free space, etc.
 	NextWritable() Volume
 
+	// VolumeStats returns the ioStats used for tracking stats for
+	// the given Volume.
+	VolumeStats(Volume) *ioStats
+
 	// Close shuts down the volume manager cleanly.
 	Close()
 }
@@ -254,12 +258,16 @@ type RRVolumeManager struct {
 	readables []Volume
 	writables []Volume
 	counter   uint32
+	iostats   map[Volume]*ioStats
 }
 
 // MakeRRVolumeManager initializes RRVolumeManager
 func MakeRRVolumeManager(volumes []Volume) *RRVolumeManager {
-	vm := &RRVolumeManager{}
+	vm := &RRVolumeManager{
+		iostats: make(map[Volume]*ioStats),
+	}
 	for _, v := range volumes {
+		vm.iostats[v] = &ioStats{}
 		vm.readables = append(vm.readables, v)
 		if v.Writable() {
 			vm.writables = append(vm.writables, v)
@@ -287,18 +295,35 @@ func (vm *RRVolumeManager) NextWritable() Volume {
 	return vm.writables[i%uint32(len(vm.writables))]
 }
 
+// VolumeStats returns an ioStats for the given volume.
+func (vm *RRVolumeManager) VolumeStats(v Volume) *ioStats {
+	return vm.iostats[v]
+}
+
 // Close the RRVolumeManager
 func (vm *RRVolumeManager) Close() {
 }
 
-// VolumeStatus provides status information of the volume consisting of:
-//   * mount_point
-//   * device_num (an integer identifying the underlying storage system)
-//   * bytes_free
-//   * bytes_used
+// VolumeStatus describes the current condition of a volume
 type VolumeStatus struct {
-	MountPoint string `json:"mount_point"`
-	DeviceNum  uint64 `json:"device_num"`
-	BytesFree  uint64 `json:"bytes_free"`
-	BytesUsed  uint64 `json:"bytes_used"`
+	MountPoint string
+	DeviceNum  uint64
+	BytesFree  uint64
+	BytesUsed  uint64
+}
+
+// ioStats tracks I/O statistics for a volume or server
+type ioStats struct {
+	Errors     uint64
+	Ops        uint64
+	CompareOps uint64
+	GetOps     uint64
+	PutOps     uint64
+	TouchOps   uint64
+	InBytes    uint64
+	OutBytes   uint64
+}
+
+type InternalStatser interface {
+	InternalStats() interface{}
 }
diff --git a/services/keepstore/volume_unix.go b/services/keepstore/volume_unix.go
index 5239ed3..f9812b0 100644
--- a/services/keepstore/volume_unix.go
+++ b/services/keepstore/volume_unix.go
@@ -322,7 +322,12 @@ func (v *UnixVolume) Status() *VolumeStatus {
 	// uses fs.Blocks - fs.Bfree.
 	free := fs.Bavail * uint64(fs.Bsize)
 	used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
-	return &VolumeStatus{v.Root, devnum, free, used}
+	return &VolumeStatus{
+		MountPoint: v.Root,
+		DeviceNum:  devnum,
+		BytesFree:  free,
+		BytesUsed:  used,
+	}
 }
 
 var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)

commit 9bc44b58589f397376b88ce7897ee39c92719b02
Author: Tom Clegg <tom at curoverse.com>
Date:   Tue Nov 8 11:04:08 2016 -0500

    10470: Serve MemStats at /debug.json instead of /status.json.

diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
index 289dce1..563d0c0 100644
--- a/services/keepstore/handlers.go
+++ b/services/keepstore/handlers.go
@@ -46,6 +46,9 @@ func MakeRESTRouter() *mux.Router {
 	// Privileged client only.
 	rest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, IndexHandler).Methods("GET", "HEAD")
 
+	// Internals/debugging info (runtime.MemStats)
+	rest.HandleFunc(`/debug.json`, DebugHandler).Methods("GET", "HEAD")
+
 	// List volumes: path, device number, bytes used/avail.
 	rest.HandleFunc(`/status.json`, StatusHandler).Methods("GET", "HEAD")
 
@@ -239,18 +242,6 @@ func IndexHandler(resp http.ResponseWriter, req *http.Request) {
 	resp.Write([]byte{'\n'})
 }
 
-// StatusHandler
-//     Responds to /status.json requests with the current node status,
-//     described in a JSON structure.
-//
-//     The data given in a status.json response includes:
-//        volumes - a list of Keep volumes currently in use by this server
-//          each volume is an object with the following fields:
-//            * mount_point
-//            * device_num (an integer identifying the underlying filesystem)
-//            * bytes_free
-//            * bytes_used
-
 // PoolStatus struct
 type PoolStatus struct {
 	Alloc uint64 `json:"BytesAllocated"`
@@ -264,12 +255,24 @@ type NodeStatus struct {
 	BufferPool PoolStatus
 	PullQueue  WorkQueueStatus
 	TrashQueue WorkQueueStatus
-	Memory     runtime.MemStats
 }
 
 var st NodeStatus
 var stLock sync.Mutex
 
+// DebugHandler addresses /debug.json requests.
+func DebugHandler(resp http.ResponseWriter, req *http.Request) {
+	type debugStats struct {
+		MemStats runtime.MemStats
+	}
+	var ds debugStats
+	runtime.ReadMemStats(&ds.MemStats)
+	err := json.NewEncoder(resp).Encode(&ds)
+	if err != nil {
+		http.Error(resp, err.Error(), 500)
+	}
+}
+
 // StatusHandler addresses /status.json requests.
 func StatusHandler(resp http.ResponseWriter, req *http.Request) {
 	stLock.Lock()
@@ -302,7 +305,6 @@ func readNodeStatus(st *NodeStatus) {
 	st.BufferPool.Len = bufs.Len()
 	st.PullQueue = getWorkQueueStatus(pullq)
 	st.TrashQueue = getWorkQueueStatus(trashq)
-	runtime.ReadMemStats(&st.Memory)
 }
 
 // return a WorkQueueStatus for the given queue. If q is nil (which

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list