[ARVADOS] updated: 527604a3886e1766a1c44d8ad4689052a3c96d90

Git user git at public.curoverse.com
Tue Nov 8 18:15:25 EST 2016


Summary of changes:
 services/keepstore/count.go     | 44 +++++++++++++++++++++++++++++++++++++
 services/keepstore/handlers.go  |  2 +-
 services/keepstore/s3_volume.go | 48 +++++++++++++++++++++--------------------
 3 files changed, 70 insertions(+), 24 deletions(-)
 create mode 100644 services/keepstore/count.go

  discards  943eb0abcf1dbcfedb032456f270e79c568932d1 (commit)
       via  527604a3886e1766a1c44d8ad4689052a3c96d90 (commit)

This update added new revisions after undoing existing revisions.  That is
to say, the old revision is not a strict subset of the new revision.  This
situation occurs when you --force push a change and generate a repository
containing something like this:

 * -- * -- B -- O -- O -- O (943eb0abcf1dbcfedb032456f270e79c568932d1)
            \
             N -- N -- N (527604a3886e1766a1c44d8ad4689052a3c96d90)

When this happens we assume that you've already had alert emails for all
of the O revisions, and so we here report only the revisions in the N
branch from the common base, B.

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 527604a3886e1766a1c44d8ad4689052a3c96d90
Author: Tom Clegg <tom at curoverse.com>
Date:   Tue Nov 8 18:15:20 2016 -0500

    10470: Report volume IO stats for S3 volumes.

diff --git a/services/keepstore/count.go b/services/keepstore/count.go
new file mode 100644
index 0000000..a9f7436
--- /dev/null
+++ b/services/keepstore/count.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+	"io"
+)
+
+func NewCountingWriter(w io.Writer, f func(uint64)) io.WriteCloser {
+	return &countingReadWriter{
+		writer:  w,
+		counter: f,
+	}
+}
+
+func NewCountingReader(r io.Reader, f func(uint64)) io.ReadCloser {
+	return &countingReadWriter{
+		reader:  r,
+		counter: f,
+	}
+}
+
+type countingReadWriter struct {
+	reader  io.Reader
+	writer  io.Writer
+	counter func(uint64)
+}
+
+func (crw *countingReadWriter) Read(buf []byte) (int, error) {
+	n, err := crw.reader.Read(buf)
+	crw.counter(uint64(n))
+	return n, err
+}
+
+func (crw *countingReadWriter) Write(buf []byte) (int, error) {
+	n, err := crw.writer.Write(buf)
+	crw.counter(uint64(n))
+	return n, err
+}
+
+func (crw *countingReadWriter) Close() error {
+	if c, ok := crw.writer.(io.Closer); ok {
+		return c.Close()
+	}
+	return nil
+}
diff --git a/services/keepstore/handlers.go b/services/keepstore/handlers.go
index 563d0c0..b51009e 100644
--- a/services/keepstore/handlers.go
+++ b/services/keepstore/handlers.go
@@ -249,9 +249,16 @@ type PoolStatus struct {
 	Len   int    `json:"BuffersInUse"`
 }
 
+type volumeStatusEnt struct {
+	Label         string
+	Status        *VolumeStatus `json:",omitempty"`
+	VolumeStats   *ioStats      `json:",omitempty"`
+	InternalStats interface{}   `json:",omitempty"`
+}
+
 // NodeStatus struct
 type NodeStatus struct {
-	Volumes    []*VolumeStatus `json:"volumes"`
+	Volumes    []*volumeStatusEnt
 	BufferPool PoolStatus
 	PullQueue  WorkQueueStatus
 	TrashQueue WorkQueueStatus
@@ -292,13 +299,20 @@ func StatusHandler(resp http.ResponseWriter, req *http.Request) {
 func readNodeStatus(st *NodeStatus) {
 	vols := KeepVM.AllReadable()
 	if cap(st.Volumes) < len(vols) {
-		st.Volumes = make([]*VolumeStatus, len(vols))
+		st.Volumes = make([]*volumeStatusEnt, len(vols))
 	}
 	st.Volumes = st.Volumes[:0]
 	for _, vol := range vols {
-		if s := vol.Status(); s != nil {
-			st.Volumes = append(st.Volumes, s)
+		var internalStats interface{}
+		if vol, ok := vol.(InternalStatser); ok {
+			internalStats = vol.InternalStats()
 		}
+		st.Volumes = append(st.Volumes, &volumeStatusEnt{
+			Label:         vol.String(),
+			Status:        vol.Status(),
+			InternalStats: internalStats,
+			//VolumeStats: KeepVM.VolumeStats(vol),
+		})
 	}
 	st.BufferPool.Alloc = bufs.Alloc()
 	st.BufferPool.Cap = bufs.Cap()
diff --git a/services/keepstore/keepstore.go b/services/keepstore/keepstore.go
index 3fb86bc..6205042 100644
--- a/services/keepstore/keepstore.go
+++ b/services/keepstore/keepstore.go
@@ -114,6 +114,9 @@ func main() {
 	}
 
 	err = theConfig.Start()
+	if err != nil {
+		log.Fatal(err)
+	}
 
 	if pidfile := theConfig.PIDFile; pidfile != "" {
 		f, err := os.OpenFile(pidfile, os.O_RDWR|os.O_CREATE, 0777)
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 17923f8..c52f616 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -15,6 +15,7 @@ import (
 	"regexp"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -148,11 +149,24 @@ type S3Volume struct {
 	ReadOnly           bool
 	UnsafeDelete       bool
 
-	bucket *s3.Bucket
+	bucket      *s3.Bucket
+	bucketStats bucketStats
+	volumeStats ioStats
 
 	startOnce sync.Once
 }
 
+type bucketStats struct {
+	Errors   uint64
+	Ops      uint64
+	GetOps   uint64
+	PutOps   uint64
+	HeadOps  uint64
+	DelOps   uint64
+	InBytes  uint64
+	OutBytes uint64
+}
+
 // Examples implements VolumeWithExamples.
 func (*S3Volume) Examples() []Volume {
 	return []Volume{
@@ -264,13 +278,19 @@ func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io
 // disappeared in a Trash race, getReader calls fixRace to recover the
 // data, and tries again.
 func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	err = v.translateError(err)
-	if err == nil || !os.IsNotExist(err) {
+	if err == nil {
+		rdr = NewCountingReader(rdr, v.tickInBytes)
+		return
+	} else if !os.IsNotExist(v.tickErr(err)) {
 		return
 	}
+
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err = v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if err != nil {
 		// If we can't read recent/X, there's no point in
 		// trying fixRace. Give up.
@@ -280,11 +300,14 @@ func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
 		err = os.ErrNotExist
 		return
 	}
+
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	if err != nil {
 		log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
-		err = v.translateError(err)
+		err = v.translateError(v.tickErr(err))
 	}
+	rdr = NewCountingReader(rdr, v.tickInBytes)
 	return
 }
 
@@ -369,11 +392,16 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
 			}
 		}()
 		defer close(ready)
-		err = v.bucket.PutReader(loc, bufr, int64(size), "application/octet-stream", s3ACL, opts)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
+		rdr := NewCountingReader(bufr, v.tickOutBytes)
+		err = v.bucket.PutReader(loc, rdr, int64(size), "application/octet-stream", s3ACL, opts)
 		if err != nil {
+			v.tickErr(err)
 			return
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
+		v.tickErr(err)
 	}()
 	select {
 	case <-ctx.Done():
@@ -397,38 +425,44 @@ func (v *S3Volume) Touch(loc string) error {
 	if v.ReadOnly {
 		return MethodDisabledError
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) && v.fixRace(loc) {
 		// The data object got trashed in a race, but fixRace
 		// rescued it.
 	} else if err != nil {
 		return err
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(err)
+	return v.translateError(v.tickErr(err))
 }
 
 // Mtime returns the stored timestamp for the given locator.
 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
 	if err != nil {
-		return zeroTime, v.translateError(err)
+		return zeroTime, v.translateError(v.tickErr(err))
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) {
 		// The data object X exists, but recent/X is missing.
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
 		if err != nil {
 			log.Printf("error: creating %q: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(err)
+			return zeroTime, v.translateError(v.tickErr(err))
 		}
 		log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		resp, err = v.bucket.Head("recent/"+loc, nil)
 		if err != nil {
 			log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(err)
+			return zeroTime, v.translateError(v.tickErr(err))
 		}
 	} else if err != nil {
 		// HEAD recent/X failed for some other reason.
@@ -508,7 +542,8 @@ func (v *S3Volume) Trash(loc string) error {
 		if !s3UnsafeDelete {
 			return ErrS3TrashDisabled
 		}
-		return v.bucket.Del(loc)
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+		return v.translateError(v.tickErr(v.bucket.Del(loc)))
 	}
 	err := v.checkRaceWindow(loc)
 	if err != nil {
@@ -518,14 +553,16 @@ func (v *S3Volume) Trash(loc string) error {
 	if err != nil {
 		return err
 	}
-	return v.translateError(v.bucket.Del(loc))
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+	return v.translateError(v.tickErr(v.bucket.Del(loc)))
 }
 
 // checkRaceWindow returns a non-nil error if trash/loc is, or might
 // be, in the race window (i.e., it's not safe to trash loc).
 func (v *S3Volume) checkRaceWindow(loc string) error {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("trash/"+loc, nil)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if os.IsNotExist(err) {
 		// OK, trash/X doesn't exist so we're not in the race
 		// window
@@ -558,11 +595,12 @@ func (v *S3Volume) checkRaceWindow(loc string) error {
 // (PutCopy returns 200 OK if the request was received, even if the
 // copy failed).
 func (v *S3Volume) safeCopy(dst, src string) error {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
 		ContentType:       "application/octet-stream",
 		MetadataDirective: "REPLACE",
 	}, v.bucket.Name+"/"+src)
-	err = v.translateError(err)
+	err = v.translateError(v.tickErr(err))
 	if err != nil {
 		return err
 	}
@@ -596,8 +634,9 @@ func (v *S3Volume) Untrash(loc string) error {
 	if err != nil {
 		return err
 	}
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(err)
+	return v.translateError(v.tickErr(err))
 }
 
 // Status returns a *VolumeStatus representing the current in-use
@@ -611,9 +650,14 @@ func (v *S3Volume) Status() *VolumeStatus {
 	}
 }
 
+// IOStatus implements InternalStatser.
+func (v *S3Volume) InternalStats() interface{} {
+	return &v.bucketStats
+}
+
 // String implements fmt.Stringer.
 func (v *S3Volume) String() string {
-	return fmt.Sprintf("s3-bucket:%+q", v.bucket.Name)
+	return fmt.Sprintf("s3-bucket:%+q", v.Bucket)
 }
 
 // Writable returns false if all future Put, Mtime, and Delete calls
@@ -639,9 +683,10 @@ func (v *S3Volume) isKeepBlock(s string) bool {
 // there was a race between Put and Trash, fixRace recovers from the
 // race by Untrashing the block.
 func (v *S3Volume) fixRace(loc string) bool {
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	trash, err := v.bucket.Head("trash/"+loc, nil)
 	if err != nil {
-		if !os.IsNotExist(v.translateError(err)) {
+		if !os.IsNotExist(v.translateError(v.tickErr(err))) {
 			log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
 		}
 		return false
@@ -652,8 +697,10 @@ func (v *S3Volume) fixRace(loc string) bool {
 		return false
 	}
 
+	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	recent, err := v.bucket.Head("recent/"+loc, nil)
 	if err != nil {
+		v.tickErr(err)
 		log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
 		return false
 	}
@@ -721,8 +768,9 @@ func (v *S3Volume) EmptyTrash() {
 			log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
 			continue
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		recent, err := v.bucket.Head("recent/"+loc, nil)
-		if err != nil && os.IsNotExist(v.translateError(err)) {
+		if err != nil && os.IsNotExist(v.translateError(v.tickErr(err))) {
 			log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
 			err = v.Untrash(loc)
 			if err != nil {
@@ -752,7 +800,10 @@ func (v *S3Volume) EmptyTrash() {
 				v.fixRace(loc)
 				v.Touch(loc)
 				continue
-			} else if _, err := v.bucket.Head(loc, nil); os.IsNotExist(err) {
+			}
+			v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
+			_, err := v.bucket.Head(loc, nil)
+			if os.IsNotExist(v.tickErr(err)) {
 				log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
 				v.fixRace(loc)
 				continue
@@ -764,18 +815,23 @@ func (v *S3Volume) EmptyTrash() {
 		if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
 			continue
 		}
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 		err = v.bucket.Del(trash.Key)
 		if err != nil {
+			v.tickErr(err)
 			log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
 			continue
 		}
 		bytesDeleted += trash.Size
 		blocksDeleted++
 
+		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		_, err = v.bucket.Head(loc, nil)
-		if os.IsNotExist(err) {
+		if os.IsNotExist(v.tickErr(err)) {
+			v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 			err = v.bucket.Del("recent/" + loc)
 			if err != nil {
+				v.tickErr(err)
 				log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
 			}
 		} else if err != nil {
@@ -788,6 +844,27 @@ func (v *S3Volume) EmptyTrash() {
 	log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
+func (v *S3Volume) tick(counters ...*uint64) {
+	for _, counter := range counters {
+		atomic.AddUint64(counter, 1)
+	}
+}
+
+func (v *S3Volume) tickErr(err error) error {
+	if err != nil {
+		atomic.AddUint64(&v.bucketStats.Errors, 1)
+	}
+	return err
+}
+
+func (v *S3Volume) tickInBytes(n uint64) {
+	atomic.AddUint64(&v.bucketStats.InBytes, n)
+}
+
+func (v *S3Volume) tickOutBytes(n uint64) {
+	atomic.AddUint64(&v.bucketStats.OutBytes, n)
+}
+
 type s3Lister struct {
 	Bucket     *s3.Bucket
 	Prefix     string
diff --git a/services/keepstore/volume.go b/services/keepstore/volume.go
index 57e18ab..b72258d 100644
--- a/services/keepstore/volume.go
+++ b/services/keepstore/volume.go
@@ -243,6 +243,10 @@ type VolumeManager interface {
 	// with more free space, etc.
 	NextWritable() Volume
 
+	// VolumeStats returns the ioStats used for tracking stats for
+	// the given Volume.
+	VolumeStats(Volume) *ioStats
+
 	// Close shuts down the volume manager cleanly.
 	Close()
 }
@@ -254,12 +258,16 @@ type RRVolumeManager struct {
 	readables []Volume
 	writables []Volume
 	counter   uint32
+	iostats   map[Volume]*ioStats
 }
 
 // MakeRRVolumeManager initializes RRVolumeManager
 func MakeRRVolumeManager(volumes []Volume) *RRVolumeManager {
-	vm := &RRVolumeManager{}
+	vm := &RRVolumeManager{
+		iostats: make(map[Volume]*ioStats),
+	}
 	for _, v := range volumes {
+		vm.iostats[v] = &ioStats{}
 		vm.readables = append(vm.readables, v)
 		if v.Writable() {
 			vm.writables = append(vm.writables, v)
@@ -287,18 +295,35 @@ func (vm *RRVolumeManager) NextWritable() Volume {
 	return vm.writables[i%uint32(len(vm.writables))]
 }
 
+// VolumeStats returns an ioStats for the given volume.
+func (vm *RRVolumeManager) VolumeStats(v Volume) *ioStats {
+	return vm.iostats[v]
+}
+
 // Close the RRVolumeManager
 func (vm *RRVolumeManager) Close() {
 }
 
-// VolumeStatus provides status information of the volume consisting of:
-//   * mount_point
-//   * device_num (an integer identifying the underlying storage system)
-//   * bytes_free
-//   * bytes_used
+// VolumeStatus describes the current condition of a volume
 type VolumeStatus struct {
-	MountPoint string `json:"mount_point"`
-	DeviceNum  uint64 `json:"device_num"`
-	BytesFree  uint64 `json:"bytes_free"`
-	BytesUsed  uint64 `json:"bytes_used"`
+	MountPoint string
+	DeviceNum  uint64
+	BytesFree  uint64
+	BytesUsed  uint64
+}
+
+// ioStats tracks I/O statistics for a volume or server
+type ioStats struct {
+	Errors     uint64
+	Ops        uint64
+	CompareOps uint64
+	GetOps     uint64
+	PutOps     uint64
+	TouchOps   uint64
+	InBytes    uint64
+	OutBytes   uint64
+}
+
+type InternalStatser interface {
+	InternalStats() interface{}
 }
diff --git a/services/keepstore/volume_unix.go b/services/keepstore/volume_unix.go
index 5239ed3..f9812b0 100644
--- a/services/keepstore/volume_unix.go
+++ b/services/keepstore/volume_unix.go
@@ -322,7 +322,12 @@ func (v *UnixVolume) Status() *VolumeStatus {
 	// uses fs.Blocks - fs.Bfree.
 	free := fs.Bavail * uint64(fs.Bsize)
 	used := (fs.Blocks - fs.Bfree) * uint64(fs.Bsize)
-	return &VolumeStatus{v.Root, devnum, free, used}
+	return &VolumeStatus{
+		MountPoint: v.Root,
+		DeviceNum:  devnum,
+		BytesFree:  free,
+		BytesUsed:  used,
+	}
 }
 
 var blockDirRe = regexp.MustCompile(`^[0-9a-f]+$`)

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list