[ARVADOS] updated: a323f88be2e4112e98c337d92aef8d137c3865a1

Git user git at public.curoverse.com
Wed Nov 16 18:10:41 EST 2016


Summary of changes:
 .gitignore                                         |   3 +
 doc/_config.yml                                    |  83 +++--
 doc/_layouts/default.html.liquid                   |   2 +-
 doc/api/authentication.html.textile.liquid         |  40 ---
 doc/api/crunch-scripts.html.textile.liquid         |   5 +-
 doc/api/execution.html.textile.liquid              |  27 ++
 doc/api/index.html.textile.liquid                  |  44 +--
 doc/api/methods.html.textile.liquid                | 120 +++----
 .../api_client_authorizations.html.textile.liquid  |  54 +++-
 doc/api/methods/api_clients.html.textile.liquid    |  40 ++-
 .../methods/authorized_keys.html.textile.liquid    |  43 ++-
 doc/api/methods/collections.html.textile.liquid    |  55 +++-
 .../methods/container_requests.html.textile.liquid | 100 ++++--
 doc/api/methods/containers.html.textile.liquid     |  76 ++++-
 doc/api/methods/groups.html.textile.liquid         |  47 +--
 doc/api/methods/humans.html.textile.liquid         |  39 ++-
 doc/api/methods/job_tasks.html.textile.liquid      |  55 +++-
 doc/api/methods/jobs.html.textile.liquid           | 101 ++++--
 doc/api/methods/keep_disks.html.textile.liquid     |  52 ++-
 doc/api/methods/keep_services.html.textile.liquid  |  52 +--
 doc/api/methods/links.html.textile.liquid          |  69 ++--
 doc/api/methods/logs.html.textile.liquid           |  49 ++-
 doc/api/methods/nodes.html.textile.liquid          |  50 ++-
 .../methods/pipeline_instances.html.textile.liquid |  44 ++-
 .../methods/pipeline_templates.html.textile.liquid | 182 ++++++++++-
 doc/api/methods/repositories.html.textile.liquid   |  45 ++-
 doc/api/methods/specimens.html.textile.liquid      |  41 ++-
 doc/api/methods/traits.html.textile.liquid         |  40 ++-
 doc/api/methods/users.html.textile.liquid          |  67 ++--
 .../methods/virtual_machines.html.textile.liquid   |  52 +--
 doc/api/methods/workflows.html.textile.liquid      |  44 ++-
 doc/api/permission-model.html.textile.liquid       | 143 +++------
 doc/api/requests.html.textile.liquid               | 349 +++++++++++++++++++++
 doc/api/resources.html.textile.liquid              |  46 ++-
 doc/api/schema/ApiClient.html.textile.liquid       |  24 --
 .../ApiClientAuthorization.html.textile.liquid     |  29 --
 doc/api/schema/AuthorizedKey.html.textile.liquid   |  24 --
 doc/api/schema/Collection.html.textile.liquid      |  39 ---
 doc/api/schema/Container.html.textile.liquid       |  59 ----
 .../schema/ContainerRequest.html.textile.liquid    |  70 -----
 doc/api/schema/Group.html.textile.liquid           |  25 --
 doc/api/schema/Human.html.textile.liquid           |  19 --
 doc/api/schema/Job.html.textile.liquid             |  69 ----
 doc/api/schema/JobTask.html.textile.liquid         |  47 ---
 doc/api/schema/KeepDisk.html.textile.liquid        |  31 --
 doc/api/schema/KeepService.html.textile.liquid     |  24 --
 doc/api/schema/Link.html.textile.liquid            |  83 -----
 doc/api/schema/Log.html.textile.liquid             |  33 --
 doc/api/schema/Node.html.textile.liquid            |  28 --
 .../schema/PipelineInstance.html.textile.liquid    |  26 --
 .../schema/PipelineTemplate.html.textile.liquid    | 161 ----------
 doc/api/schema/Repository.html.textile.liquid      |  25 --
 doc/api/schema/Specimen.html.textile.liquid        |  22 --
 doc/api/schema/Trait.html.textile.liquid           |  22 --
 doc/api/schema/User.html.textile.liquid            |  30 --
 doc/api/schema/VirtualMachine.html.textile.liquid  |  21 --
 doc/api/schema/Workflow.html.textile.liquid        |  23 --
 doc/api/storage.html.textile.liquid                | 170 ++++++++++
 doc/api/tokens.html.textile.liquid                 |  63 ++++
 doc/images/Arvados_Permissions.svg                 |   4 +
 doc/images/Crunch_dispatch.svg                     |   4 +
 doc/images/Keep_manifests.svg                      |   4 +
 doc/images/Keep_reading_writing_block.svg          |   4 +
 doc/images/Keep_rendezvous_hashing.svg             |   4 +
 doc/images/Session_Establishment.svg               |   4 +
 .../install-prerequisites.html.textile.liquid      |   4 +-
 doc/sdk/cli/install.html.textile.liquid            |   1 -
 doc/sdk/go/example.html.textile.liquid             |  76 +++++
 doc/sdk/go/index.html.textile.liquid               |  15 +-
 doc/sdk/index.html.textile.liquid                  |  13 +-
 doc/sdk/java/example.html.textile.liquid           |  78 +++++
 doc/sdk/java/index.html.textile.liquid             |  11 +-
 doc/sdk/perl/example.html.textile.liquid           |  81 +++++
 doc/sdk/perl/index.html.textile.liquid             |  59 +---
 doc/sdk/python/events.html.textile.liquid          |   2 +-
 doc/sdk/python/example.html.textile.liquid         |  51 +++
 doc/sdk/python/sdk-python.html.textile.liquid      |   3 +-
 doc/sdk/ruby/example.html.textile.liquid           |  75 +++++
 doc/sdk/ruby/index.html.textile.liquid             |  74 +----
 doc/user/cwl/cwl-runner.html.textile.liquid        |   1 +
 .../reference/job-pipeline-ref.html.textile.liquid |   2 +-
 .../running-external-program.html.textile.liquid   |   4 +-
 .../tutorial-submit-job.html.textile.liquid        |   4 +-
 sdk/cli/bin/arv-run-pipeline-instance              |   2 +
 sdk/cwl/arvados_cwl/__init__.py                    |  23 +-
 sdk/cwl/arvados_cwl/arvcontainer.py                |   7 +-
 sdk/cwl/arvados_cwl/arvjob.py                      |   6 +-
 sdk/cwl/arvados_cwl/crunch_script.py               |   7 +-
 sdk/cwl/arvados_cwl/runner.py                      |   3 +-
 sdk/cwl/tests/test_container.py                    |  23 +-
 sdk/cwl/tests/test_make_output.py                  |   9 +-
 sdk/cwl/tests/test_submit.py                       |  88 ++++++
 sdk/go/arvados/container.go                        |  38 ++-
 .../arvados/v1/collections_controller.rb           |   2 +-
 services/api/app/models/container.rb               |  11 +-
 services/api/app/models/container_request.rb       |  20 +-
 ...43147_add_scheduling_parameters_to_container.rb |   6 +
 services/api/db/structure.sql                      |  10 +-
 .../arvados/v1/collections_controller_test.rb      |  43 +++
 services/api/test/unit/container_request_test.rb   |  32 ++
 services/arv-git-httpd/main.go                     |   2 +-
 .../crunch-dispatch-slurm/crunch-dispatch-slurm.go |   6 +-
 .../crunch-dispatch-slurm_test.go                  |   2 +-
 services/keep-web/main.go                          |   2 +-
 services/keepproxy/keepproxy.go                    |   2 +-
 services/keepstore/keepstore.go                    |   2 +-
 services/keepstore/s3_volume.go                    | 232 +++++++-------
 services/keepstore/s3_volume_test.go               |  30 ++
 tools/arvbox/lib/arvbox/docker/Dockerfile.base     |  48 ++-
 tools/arvbox/lib/arvbox/docker/common.sh           |   6 +-
 .../lib/arvbox/docker/service/ready/run-service    |   4 +-
 111 files changed, 2669 insertions(+), 2001 deletions(-)
 delete mode 100644 doc/api/authentication.html.textile.liquid
 create mode 100644 doc/api/execution.html.textile.liquid
 create mode 100644 doc/api/requests.html.textile.liquid
 delete mode 100644 doc/api/schema/ApiClient.html.textile.liquid
 delete mode 100644 doc/api/schema/ApiClientAuthorization.html.textile.liquid
 delete mode 100644 doc/api/schema/AuthorizedKey.html.textile.liquid
 delete mode 100644 doc/api/schema/Collection.html.textile.liquid
 delete mode 100644 doc/api/schema/Container.html.textile.liquid
 delete mode 100644 doc/api/schema/ContainerRequest.html.textile.liquid
 delete mode 100644 doc/api/schema/Group.html.textile.liquid
 delete mode 100644 doc/api/schema/Human.html.textile.liquid
 delete mode 100644 doc/api/schema/Job.html.textile.liquid
 delete mode 100644 doc/api/schema/JobTask.html.textile.liquid
 delete mode 100644 doc/api/schema/KeepDisk.html.textile.liquid
 delete mode 100644 doc/api/schema/KeepService.html.textile.liquid
 delete mode 100644 doc/api/schema/Link.html.textile.liquid
 delete mode 100644 doc/api/schema/Log.html.textile.liquid
 delete mode 100644 doc/api/schema/Node.html.textile.liquid
 delete mode 100644 doc/api/schema/PipelineInstance.html.textile.liquid
 delete mode 100644 doc/api/schema/PipelineTemplate.html.textile.liquid
 delete mode 100644 doc/api/schema/Repository.html.textile.liquid
 delete mode 100644 doc/api/schema/Specimen.html.textile.liquid
 delete mode 100644 doc/api/schema/Trait.html.textile.liquid
 delete mode 100644 doc/api/schema/User.html.textile.liquid
 delete mode 100644 doc/api/schema/VirtualMachine.html.textile.liquid
 delete mode 100644 doc/api/schema/Workflow.html.textile.liquid
 create mode 100644 doc/api/storage.html.textile.liquid
 create mode 100644 doc/api/tokens.html.textile.liquid
 create mode 100644 doc/images/Arvados_Permissions.svg
 create mode 100644 doc/images/Crunch_dispatch.svg
 create mode 100644 doc/images/Keep_manifests.svg
 create mode 100644 doc/images/Keep_reading_writing_block.svg
 create mode 100644 doc/images/Keep_rendezvous_hashing.svg
 create mode 100644 doc/images/Session_Establishment.svg
 create mode 100644 doc/sdk/go/example.html.textile.liquid
 create mode 100644 doc/sdk/java/example.html.textile.liquid
 create mode 100644 doc/sdk/perl/example.html.textile.liquid
 create mode 100644 doc/sdk/python/example.html.textile.liquid
 create mode 100644 doc/sdk/ruby/example.html.textile.liquid
 create mode 100644 services/api/db/migrate/20161111143147_add_scheduling_parameters_to_container.rb

       via  a323f88be2e4112e98c337d92aef8d137c3865a1 (commit)
       via  068da0eea34bf88568ac9eb729a1d83163b7b25a (commit)
       via  b8ed24a53f163c19b58e6e4d671a3dd7cbe8a088 (commit)
       via  d2f9e7809bd1f63638600c7fc8189182c0f327c0 (commit)
       via  2251688e66191ff1169080f50868bf57e463659c (commit)
       via  c14246b9a21d038fc6fa850f4032659a98397784 (commit)
       via  5d2ef6f7a2a8f93ec411c420287f30af92294520 (commit)
       via  780d334ec4b2d47379d0330ace77b3821c880842 (commit)
       via  eae48c31bb338689ec67fbc6a14a2e0b1fb5e3b6 (commit)
       via  1d656f4f1ec1f890a7677e748bea43a08cfa0b6c (commit)
       via  3af6db5dc4e2f08b2ebb49a82109c4325ad7fcc4 (commit)
       via  38fae0458644b89322ddeac125971800b9e452e5 (commit)
       via  02010431f52911a6ff908e673c534291beb929ac (commit)
       via  82fa37ac01169178f6a9b1c142926de7b50e8841 (commit)
       via  1129e9428dc1f3a300c4148bf12821eecf511ab3 (commit)
       via  90c48f84391d6b4d6b8ed366d5a42d24bf6c696f (commit)
       via  0b5d04beb288175a285c36a38f255399dfe7d0d7 (commit)
       via  b86543493dffefb1ec245f48550cfa9e0119f4d1 (commit)
       via  8c7a6b5bb3cf27dff61a3b2d83fd4374c7262206 (commit)
       via  9640930c3934344ab64c92a5c86c1f7488a4de42 (commit)
       via  f6a8bf41c9f038ece715ec2744c36160f9c6c591 (commit)
       via  4225d058e0bc4380203fe5959e7e54febc91e83b (commit)
       via  ddeef8adc0cb7cdb55be644b9335ea51919ed513 (commit)
       via  c198274863bb5d72ef34dfc311c62bf50d6bd4f4 (commit)
       via  1e6d7656fbfd1f954571157fc7e7e4f75ea5911e (commit)
       via  aa49b45a4d25cb1e4282e242a2502c8a591f8615 (commit)
       via  79786a56410ef381499fb0bfdc5a18407ab33082 (commit)
       via  364fed6e1d4036719e4c461cfe0bc24e7f52f144 (commit)
       via  5fc627d22b47723289251e1e1d9dc45c0e1bd49e (commit)
       via  26e1c10f963a586e40ea9dcb46a87b0107c97b7c (commit)
       via  7e52fd153f2d16f94061ef1eabfe653d4a83852a (commit)
       via  b8610f34c21f1cf44b938802f37971b06af4361c (commit)
       via  0f2ab548f96e8604a929e0636517f634b7dfb0ad (commit)
       via  e59d21d3f47cbee83a6dc389584bd7b17bec270c (commit)
       via  051efbd3d843baa690b334e57fd09fad6a908bb9 (commit)
       via  3c2fee34ad8f668f5cf9001d6b7d605965ec28bb (commit)
       via  c10e9e5f3398d40a3346c7d7c1f84bf50262b8ec (commit)
       via  e0903a2cff2df4e6169e95f7439c0fa361c60ea8 (commit)
       via  94e52d9256cb17dddbc9c383d2ab90e713c25e3b (commit)
       via  42b9e37cd53d63980d3fa4a238f9ff6adad9ccc4 (commit)
       via  becada3b24006cf39417335794cb46556d7aa605 (commit)
      from  d46753814e6954eeca07e3c40ea9d82b4f3d95e7 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit a323f88be2e4112e98c337d92aef8d137c3865a1
Author: Tom Clegg <tom at curoverse.com>
Date:   Wed Nov 16 18:10:32 2016 -0500

    10484: Test s3 bucket stats.

diff --git a/services/keepstore/s3_volume_test.go b/services/keepstore/s3_volume_test.go
index 63b1862..10e9158 100644
--- a/services/keepstore/s3_volume_test.go
+++ b/services/keepstore/s3_volume_test.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"context"
 	"crypto/md5"
+	"encoding/json"
 	"fmt"
 	"io/ioutil"
 	"log"
@@ -82,6 +83,35 @@ func (s *StubbedS3Suite) TestIndex(c *check.C) {
 	}
 }
 
+func (s *StubbedS3Suite) TestStats(c *check.C) {
+	v := s.newTestableVolume(c, 5*time.Minute, false, 2)
+	stats := func() string {
+		buf, err := json.Marshal(v.InternalStats())
+		c.Check(err, check.IsNil)
+		return string(buf)
+	}
+
+	c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
+
+	loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+	_, err := v.Get(context.Background(), loc, make([]byte, 3))
+	c.Check(err, check.NotNil)
+	c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
+	c.Check(stats(), check.Matches, `.*"\*s3.Error 404 [^"]*":[^0].*`)
+	c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
+
+	err = v.Put(context.Background(), loc, []byte("foo"))
+	c.Check(err, check.IsNil)
+	c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
+	c.Check(stats(), check.Matches, `.*"PutOps":2,.*`)
+
+	_, err = v.Get(context.Background(), loc, make([]byte, 3))
+	c.Check(err, check.IsNil)
+	_, err = v.Get(context.Background(), loc, make([]byte, 3))
+	c.Check(err, check.IsNil)
+	c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
+}
+
 func (s *StubbedS3Suite) TestBackendStates(c *check.C) {
 	defer func(tl, bs arvados.Duration) {
 		theConfig.TrashLifetime = tl

commit 068da0eea34bf88568ac9eb729a1d83163b7b25a
Author: Tom Clegg <tom at curoverse.com>
Date:   Wed Nov 16 17:51:24 2016 -0500

    10484: Tidy up stats-tracking code into a bucket proxy type.

diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 3b843e0..0fdf15c 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -149,28 +149,12 @@ type S3Volume struct {
 	ReadOnly           bool
 	UnsafeDelete       bool
 
-	bucket      *s3.Bucket
-	bucketStats bucketStats
+	bucket      *s3bucket
 	volumeStats ioStats
 
 	startOnce sync.Once
 }
 
-type bucketStats struct {
-	Errors   uint64
-	Ops      uint64
-	GetOps   uint64
-	PutOps   uint64
-	HeadOps  uint64
-	DelOps   uint64
-	InBytes  uint64
-	OutBytes uint64
-
-	ErrorCodes map[string]uint64 `json:",omitempty"`
-
-	lock sync.Mutex
-}
-
 // Examples implements VolumeWithExamples.
 func (*S3Volume) Examples() []Volume {
 	return []Volume{
@@ -248,9 +232,11 @@ func (v *S3Volume) Start() error {
 	client := s3.New(auth, region)
 	client.ConnectTimeout = time.Duration(v.ConnectTimeout)
 	client.ReadTimeout = time.Duration(v.ReadTimeout)
-	v.bucket = &s3.Bucket{
-		S3:   client,
-		Name: v.Bucket,
+	v.bucket = &s3bucket{
+		Bucket: &s3.Bucket{
+			S3:   client,
+			Name: v.Bucket,
+		},
 	}
 	return nil
 }
@@ -282,19 +268,14 @@ func (v *S3Volume) getReaderWithContext(ctx context.Context, loc string) (rdr io
 // disappeared in a Trash race, getReader calls fixRace to recover the
 // data, and tries again.
 func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	err = v.translateError(err)
-	if err == nil {
-		rdr = NewCountingReader(rdr, v.tickInBytes)
-		return
-	} else if !os.IsNotExist(v.tickErr(err)) {
+	if err == nil || !os.IsNotExist(err) {
 		return
 	}
 
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err = v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if err != nil {
 		// If we can't read recent/X, there's no point in
 		// trying fixRace. Give up.
@@ -305,13 +286,11 @@ func (v *S3Volume) getReader(loc string) (rdr io.ReadCloser, err error) {
 		return
 	}
 
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.GetOps)
 	rdr, err = v.bucket.GetReader(loc)
 	if err != nil {
 		log.Printf("warning: reading %s after successful fixRace: %s", loc, err)
-		err = v.translateError(v.tickErr(err))
+		err = v.translateError(err)
 	}
-	rdr = NewCountingReader(rdr, v.tickInBytes)
 	return
 }
 
@@ -396,16 +375,11 @@ func (v *S3Volume) Put(ctx context.Context, loc string, block []byte) error {
 			}
 		}()
 		defer close(ready)
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
-		rdr := NewCountingReader(bufr, v.tickOutBytes)
-		err = v.bucket.PutReader(loc, rdr, int64(size), "application/octet-stream", s3ACL, opts)
+		err = v.bucket.PutReader(loc, bufr, int64(size), "application/octet-stream", s3ACL, opts)
 		if err != nil {
-			v.tickErr(err)
 			return
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-		v.tickErr(err)
 	}()
 	select {
 	case <-ctx.Done():
@@ -429,44 +403,38 @@ func (v *S3Volume) Touch(loc string) error {
 	if v.ReadOnly {
 		return MethodDisabledError
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if os.IsNotExist(err) && v.fixRace(loc) {
 		// The data object got trashed in a race, but fixRace
 		// rescued it.
 	} else if err != nil {
 		return err
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(v.tickErr(err))
+	return v.translateError(err)
 }
 
 // Mtime returns the stored timestamp for the given locator.
 func (v *S3Volume) Mtime(loc string) (time.Time, error) {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	_, err := v.bucket.Head(loc, nil)
 	if err != nil {
-		return zeroTime, v.translateError(v.tickErr(err))
+		return zeroTime, v.translateError(err)
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("recent/"+loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if os.IsNotExist(err) {
 		// The data object X exists, but recent/X is missing.
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 		err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
 		if err != nil {
 			log.Printf("error: creating %q: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(v.tickErr(err))
+			return zeroTime, v.translateError(err)
 		}
 		log.Printf("info: created %q to migrate existing block to new storage scheme", "recent/"+loc)
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		resp, err = v.bucket.Head("recent/"+loc, nil)
 		if err != nil {
 			log.Printf("error: created %q but HEAD failed: %s", "recent/"+loc, err)
-			return zeroTime, v.translateError(v.tickErr(err))
+			return zeroTime, v.translateError(err)
 		}
 	} else if err != nil {
 		// HEAD recent/X failed for some other reason.
@@ -480,16 +448,19 @@ func (v *S3Volume) Mtime(loc string) (time.Time, error) {
 func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
 	// Use a merge sort to find matching sets of X and recent/X.
 	dataL := s3Lister{
-		Bucket:   v.bucket,
+		Bucket:   v.bucket.Bucket,
 		Prefix:   prefix,
 		PageSize: v.IndexPageSize,
 	}
 	recentL := s3Lister{
-		Bucket:   v.bucket,
+		Bucket:   v.bucket.Bucket,
 		Prefix:   "recent/" + prefix,
 		PageSize: v.IndexPageSize,
 	}
+	v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+	v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 	for data, recent := dataL.First(), recentL.First(); data != nil; data = dataL.Next() {
+		v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 		if data.Key >= "g" {
 			// Conveniently, "recent/*" and "trash/*" are
 			// lexically greater than all hex-encoded data
@@ -511,10 +482,12 @@ func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
 		for recent != nil {
 			if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
 				recent = recentL.Next()
+				v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 				continue
 			} else if cmp == 0 {
 				stamp = recent
 				recent = recentL.Next()
+				v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
 				break
 			} else {
 				// recent/X marker is missing: we'll
@@ -546,8 +519,7 @@ func (v *S3Volume) Trash(loc string) error {
 		if !s3UnsafeDelete {
 			return ErrS3TrashDisabled
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
-		return v.translateError(v.tickErr(v.bucket.Del(loc)))
+		return v.translateError(v.bucket.Del(loc))
 	}
 	err := v.checkRaceWindow(loc)
 	if err != nil {
@@ -557,16 +529,14 @@ func (v *S3Volume) Trash(loc string) error {
 	if err != nil {
 		return err
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
-	return v.translateError(v.tickErr(v.bucket.Del(loc)))
+	return v.translateError(v.bucket.Del(loc))
 }
 
 // checkRaceWindow returns a non-nil error if trash/loc is, or might
 // be, in the race window (i.e., it's not safe to trash loc).
 func (v *S3Volume) checkRaceWindow(loc string) error {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	resp, err := v.bucket.Head("trash/"+loc, nil)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if os.IsNotExist(err) {
 		// OK, trash/X doesn't exist so we're not in the race
 		// window
@@ -599,12 +569,11 @@ func (v *S3Volume) checkRaceWindow(loc string) error {
 // (PutCopy returns 200 OK if the request was received, even if the
 // copy failed).
 func (v *S3Volume) safeCopy(dst, src string) error {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	resp, err := v.bucket.PutCopy(dst, s3ACL, s3.CopyOptions{
 		ContentType:       "application/octet-stream",
 		MetadataDirective: "REPLACE",
 	}, v.bucket.Name+"/"+src)
-	err = v.translateError(v.tickErr(err))
+	err = v.translateError(err)
 	if err != nil {
 		return err
 	}
@@ -638,9 +607,8 @@ func (v *S3Volume) Untrash(loc string) error {
 	if err != nil {
 		return err
 	}
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.PutOps)
 	err = v.bucket.Put("recent/"+loc, nil, "application/octet-stream", s3ACL, s3.Options{})
-	return v.translateError(v.tickErr(err))
+	return v.translateError(err)
 }
 
 // Status returns a *VolumeStatus representing the current in-use
@@ -654,9 +622,9 @@ func (v *S3Volume) Status() *VolumeStatus {
 	}
 }
 
-// IOStatus implements InternalStatser.
+// InternalStats returns bucket I/O and API call counters.
 func (v *S3Volume) InternalStats() interface{} {
-	return &v.bucketStats
+	return &v.bucket.stats
 }
 
 // String implements fmt.Stringer.
@@ -687,10 +655,9 @@ func (v *S3Volume) isKeepBlock(s string) bool {
 // there was a race between Put and Trash, fixRace recovers from the
 // race by Untrashing the block.
 func (v *S3Volume) fixRace(loc string) bool {
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	trash, err := v.bucket.Head("trash/"+loc, nil)
 	if err != nil {
-		if !os.IsNotExist(v.translateError(v.tickErr(err))) {
+		if !os.IsNotExist(v.translateError(err)) {
 			log.Printf("error: fixRace: HEAD %q: %s", "trash/"+loc, err)
 		}
 		return false
@@ -701,10 +668,8 @@ func (v *S3Volume) fixRace(loc string) bool {
 		return false
 	}
 
-	v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 	recent, err := v.bucket.Head("recent/"+loc, nil)
 	if err != nil {
-		v.tickErr(err)
 		log.Printf("error: fixRace: HEAD %q: %s", "recent/"+loc, err)
 		return false
 	}
@@ -753,7 +718,7 @@ func (v *S3Volume) EmptyTrash() {
 
 	// Use a merge sort to find matching sets of trash/X and recent/X.
 	trashL := s3Lister{
-		Bucket:   v.bucket,
+		Bucket:   v.bucket.Bucket,
 		Prefix:   "trash/",
 		PageSize: v.IndexPageSize,
 	}
@@ -772,9 +737,8 @@ func (v *S3Volume) EmptyTrash() {
 			log.Printf("warning: %s: EmptyTrash: %q: parse %q: %s", v, trash.Key, trash.LastModified, err)
 			continue
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		recent, err := v.bucket.Head("recent/"+loc, nil)
-		if err != nil && os.IsNotExist(v.translateError(v.tickErr(err))) {
+		if err != nil && os.IsNotExist(v.translateError(err)) {
 			log.Printf("warning: %s: EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", v, trash.Key, "recent/"+loc, err)
 			err = v.Untrash(loc)
 			if err != nil {
@@ -805,9 +769,8 @@ func (v *S3Volume) EmptyTrash() {
 				v.Touch(loc)
 				continue
 			}
-			v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 			_, err := v.bucket.Head(loc, nil)
-			if os.IsNotExist(v.tickErr(err)) {
+			if os.IsNotExist(err) {
 				log.Printf("notice: %s: EmptyTrash: detected recent race for %q, calling fixRace", v, loc)
 				v.fixRace(loc)
 				continue
@@ -819,23 +782,18 @@ func (v *S3Volume) EmptyTrash() {
 		if startT.Sub(trashT) < theConfig.TrashLifetime.Duration() {
 			continue
 		}
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
 		err = v.bucket.Del(trash.Key)
 		if err != nil {
-			v.tickErr(err)
 			log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, trash.Key, err)
 			continue
 		}
 		bytesDeleted += trash.Size
 		blocksDeleted++
 
-		v.tick(&v.bucketStats.Ops, &v.bucketStats.HeadOps)
 		_, err = v.bucket.Head(loc, nil)
-		if os.IsNotExist(v.tickErr(err)) {
-			v.tick(&v.bucketStats.Ops, &v.bucketStats.DelOps)
+		if os.IsNotExist(err) {
 			err = v.bucket.Del("recent/" + loc)
 			if err != nil {
-				v.tickErr(err)
 				log.Printf("warning: %s: EmptyTrash: deleting %q: %s", v, "recent/"+loc, err)
 			}
 		} else if err != nil {
@@ -848,38 +806,6 @@ func (v *S3Volume) EmptyTrash() {
 	log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
 }
 
-func (v *S3Volume) tick(counters ...*uint64) {
-	for _, counter := range counters {
-		atomic.AddUint64(counter, 1)
-	}
-}
-
-func (v *S3Volume) tickErr(err error) error {
-	if err == nil {
-		return nil
-	}
-	atomic.AddUint64(&v.bucketStats.Errors, 1)
-	errStr := fmt.Sprintf("%T", err)
-	if err, ok := err.(*s3.Error); ok {
-		errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
-	}
-	v.bucketStats.lock.Lock()
-	if v.bucketStats.ErrorCodes == nil {
-		v.bucketStats.ErrorCodes = make(map[string]uint64)
-	}
-	v.bucketStats.ErrorCodes[errStr]++
-	v.bucketStats.lock.Unlock()
-	return err
-}
-
-func (v *S3Volume) tickInBytes(n uint64) {
-	atomic.AddUint64(&v.bucketStats.InBytes, n)
-}
-
-func (v *S3Volume) tickOutBytes(n uint64) {
-	atomic.AddUint64(&v.bucketStats.OutBytes, n)
-}
-
 type s3Lister struct {
 	Bucket     *s3.Bucket
 	Prefix     string
@@ -938,3 +864,91 @@ func (lister *s3Lister) pop() (k *s3.Key) {
 	}
 	return
 }
+
+// s3bucket wraps s3.bucket and counts I/O and API usage stats.
+type s3bucket struct {
+	*s3.Bucket
+	stats s3bucketStats
+}
+
+func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
+	rdr, err := b.Bucket.GetReader(path)
+	b.stats.tick(&b.stats.Ops, &b.stats.GetOps)
+	b.stats.tickErr(err)
+	return NewCountingReader(rdr, b.stats.tickInBytes), err
+}
+
+func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
+	resp, err := b.Bucket.Head(path, headers)
+	b.stats.tick(&b.stats.Ops, &b.stats.HeadOps)
+	b.stats.tickErr(err)
+	return resp, err
+}
+
+func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
+	err := b.Bucket.PutReader(path, NewCountingReader(r, b.stats.tickOutBytes), length, contType, perm, options)
+	b.stats.tick(&b.stats.Ops, &b.stats.PutOps)
+	b.stats.tickErr(err)
+	return err
+}
+
+func (b *s3bucket) Put(path string, data []byte, contType string, perm s3.ACL, options s3.Options) error {
+	err := b.Bucket.PutReader(path, NewCountingReader(bytes.NewBuffer(data), b.stats.tickOutBytes), int64(len(data)), contType, perm, options)
+	b.stats.tick(&b.stats.Ops, &b.stats.PutOps)
+	b.stats.tickErr(err)
+	return err
+}
+
+func (b *s3bucket) Del(path string) error {
+	err := b.Bucket.Del(path)
+	b.stats.tick(&b.stats.Ops, &b.stats.DelOps)
+	b.stats.tickErr(err)
+	return err
+}
+
+type s3bucketStats struct {
+	Errors   uint64
+	Ops      uint64
+	GetOps   uint64
+	PutOps   uint64
+	HeadOps  uint64
+	DelOps   uint64
+	ListOps  uint64
+	InBytes  uint64
+	OutBytes uint64
+
+	ErrorCodes map[string]uint64 `json:",omitempty"`
+
+	lock sync.Mutex
+}
+
+func (s *s3bucketStats) tickInBytes(n uint64) {
+	atomic.AddUint64(&s.InBytes, n)
+}
+
+func (s *s3bucketStats) tickOutBytes(n uint64) {
+	atomic.AddUint64(&s.OutBytes, n)
+}
+
+func (s *s3bucketStats) tick(counters ...*uint64) {
+	for _, counter := range counters {
+		atomic.AddUint64(counter, 1)
+	}
+}
+
+func (s *s3bucketStats) tickErr(err error) {
+	if err == nil {
+		return
+	}
+	atomic.AddUint64(&s.Errors, 1)
+	errStr := fmt.Sprintf("%T", err)
+	if err, ok := err.(*s3.Error); ok {
+		errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
+	}
+	s.lock.Lock()
+	if s.ErrorCodes == nil {
+		s.ErrorCodes = make(map[string]uint64)
+	}
+	s.ErrorCodes[errStr]++
+	s.lock.Unlock()
+}

commit b8ed24a53f163c19b58e6e4d671a3dd7cbe8a088
Merge: d467538 d2f9e78
Author: Tom Clegg <tom at curoverse.com>
Date:   Wed Nov 16 17:05:56 2016 -0500

    10484: Merge branch 'master' into 10484-keepstore-stats


-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list