[ARVADOS] updated: 991a32dd18fb9be2862baca9d6b374d09ae8bc38
Git user
git at public.curoverse.com
Tue Jan 3 13:34:09 EST 2017
Summary of changes:
.../app/controllers/projects_controller.rb | 10 +-
.../test/controllers/projects_controller_test.rb | 9 +-
build/package-build-dockerfiles/Makefile | 6 +-
build/package-build-dockerfiles/centos6/Dockerfile | 37 ------
build/package-test-dockerfiles/centos6/Dockerfile | 22 ----
.../centos6/localrepo.repo | 5 -
build/package-testing/test-packages-centos6.sh | 1 -
build/run-build-packages-all-targets.sh | 2 +-
build/run-build-packages-one-target.sh | 4 -
build/run-build-packages-sso.sh | 2 +-
build/run-build-packages.sh | 53 --------
sdk/cli/bin/crunch-job | 2 +-
sdk/go/arvados/collection.go | 4 +-
sdk/go/arvados/resource_list.go | 11 +-
.../arvados/v1/collections_controller.rb | 30 ++++-
services/api/app/middlewares/rack_socket.rb | 20 ++-
services/api/app/models/arvados_model.rb | 2 +-
services/api/app/models/collection.rb | 98 ++++++++++++---
services/api/config/application.default.yml | 9 +-
services/api/config/routes.rb | 1 +
...61222153434_split_expiry_to_trash_and_delete.rb | 42 +++++++
services/api/db/structure.sql | 45 +++++--
services/api/lib/eventbus.rb | 1 +
services/api/lib/load_param.rb | 3 +-
services/api/lib/sweep_trashed_collections.rb | 34 +++++
services/api/test/fixtures/collections.yml | 41 +++++-
.../arvados/v1/collections_controller_test.rb | 65 ++++++++++
services/api/test/integration/select_test.rb | 7 ++
services/api/test/unit/collection_test.rb | 140 +++++++++++++++++++--
services/arv-web/arv-web.py | 2 +-
services/crunch-run/crunchrun.go | 4 +-
services/keep-balance/balance_run_test.go | 5 +-
services/keep-balance/collection.go | 22 ++--
services/keepstore/azure_blob_volume.go | 50 +++++---
services/keepstore/azure_blob_volume_test.go | 16 ++-
services/keepstore/pipe_adapters.go | 89 +++++++++++++
services/keepstore/s3_volume.go | 28 ++---
services/keepstore/{stats.go => stats_ticker.go} | 18 ++-
services/keepstore/volume.go | 12 ++
services/keepstore/volume_unix.go | 92 +++++++++-----
services/keepstore/volume_unix_test.go | 50 ++++++--
41 files changed, 802 insertions(+), 292 deletions(-)
delete mode 100644 build/package-build-dockerfiles/centos6/Dockerfile
delete mode 100644 build/package-test-dockerfiles/centos6/Dockerfile
delete mode 100644 build/package-test-dockerfiles/centos6/localrepo.repo
delete mode 120000 build/package-testing/test-packages-centos6.sh
create mode 100644 services/api/db/migrate/20161222153434_split_expiry_to_trash_and_delete.rb
create mode 100644 services/api/lib/sweep_trashed_collections.rb
create mode 100644 services/keepstore/pipe_adapters.go
rename services/keepstore/{stats.go => stats_ticker.go} (50%)
discards 5f6f2e8e908829f3b73f423cba82458eb4b221f1 (commit)
via 991a32dd18fb9be2862baca9d6b374d09ae8bc38 (commit)
via 3cbe55d440788b0b9b1a9d9e642103929b57e8fd (commit)
via 3121f0dfb9262ccd50d0637c9f7cedf9191f69bf (commit)
via b2da02379210a1d43914b36f412f76e639203a81 (commit)
via 590e8c5e913576f947a7e28e45166770a8bd619e (commit)
via 2aa6adffd7a89601510b39481a071ba03213f6fd (commit)
via 1767cc94ddd427c6610c82e1b27f6a9f6793b39a (commit)
via d0d60bbbe1c60516334e5d56f3c923b79e611b21 (commit)
via 9048749c1419cf5e130a4b5d992a2b9c5bafd9cf (commit)
via b405f0f487f35f62d8362dc06981b83176b77d44 (commit)
via 4c76ae6b65612579e465a2862f3c2bdfea06b43a (commit)
via 8d0b0fc4046a01a1aeaebb177c3b740237728c25 (commit)
via a12864a31d5569c74ed32157d5fe928a1c2563b7 (commit)
via 29246b83f1b1c66eb12ed53c844ef37ee0b77568 (commit)
via 3f556ca1b44b7e01874bd172abbb7cb3df0615db (commit)
via b4d9dfe1e7acb1f45c2cc699020bf9299a0db5c9 (commit)
via b3e5ea60bdecb41fbf954b67ab859dc4542d0c1a (commit)
via 9c7a9a6e4660b3e7cc6419e5818ffaa3a78b6d19 (commit)
via 7d6cb0405504a536a88aa8467e474f7fd13299a7 (commit)
via d02ba63331267f782e0bc5868ca4aab456e0f2b4 (commit)
via 568c7abf660b7a68f70b6ea47ae2e7352233f053 (commit)
via 6cec281e8653731602ab871cc41ddd21ca8182ab (commit)
via 4ee5a6efb8b01afddddaa8ce5ed7d0de42a287d6 (commit)
via eebb0457f1bb69ec2084cbefcbff8c19e404d556 (commit)
via 0aa01993bc8533d417dc510b1860a4a583e093b4 (commit)
via cdd8dc7bc4cca452e25c5b014e5f2bb592fb31ce (commit)
This update added new revisions after undoing existing revisions. That is
to say, the old revision is not a strict subset of the new revision. This
situation occurs when you --force push a change and generate a repository
containing something like this:
* -- * -- B -- O -- O -- O (5f6f2e8e908829f3b73f423cba82458eb4b221f1)
\
N -- N -- N (991a32dd18fb9be2862baca9d6b374d09ae8bc38)
When this happens we assume that you've already had alert emails for all
of the O revisions, and so we here report only the revisions in the N
branch from the common base, B.
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 991a32dd18fb9be2862baca9d6b374d09ae8bc38
Author: Tom Clegg <tom at curoverse.com>
Date: Tue Jan 3 10:34:27 2017 -0500
10682: Add comments to statsTicker.
diff --git a/services/keepstore/stats.go b/services/keepstore/stats_ticker.go
similarity index 75%
rename from services/keepstore/stats.go
rename to services/keepstore/stats_ticker.go
index e02eb3a..f3a79c6 100644
--- a/services/keepstore/stats.go
+++ b/services/keepstore/stats_ticker.go
@@ -22,6 +22,9 @@ func (s *statsTicker) Tick(counters ...*uint64) {
}
}
+// TickErr increments the overall error counter, as well as the
+// ErrorCodes entry for the given errType. If err is nil, TickErr is a
+// no-op.
func (s *statsTicker) TickErr(err error, errType string) {
if err == nil {
return
@@ -36,10 +39,12 @@ func (s *statsTicker) TickErr(err error, errType string) {
s.lock.Unlock()
}
+// TickInBytes increments the incoming byte counter by n.
func (s *statsTicker) TickInBytes(n uint64) {
atomic.AddUint64(&s.InBytes, n)
}
+// TickOutBytes increments the outgoing byte counter by n.
func (s *statsTicker) TickOutBytes(n uint64) {
atomic.AddUint64(&s.OutBytes, n)
}
commit 3cbe55d440788b0b9b1a9d9e642103929b57e8fd
Author: Tom Clegg <tom at curoverse.com>
Date: Mon Jan 2 14:54:43 2017 -0500
10682: Track Azure backend errors by type.
diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index 3837598..220744c 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -106,26 +106,6 @@ type AzureBlobVolume struct {
bsClient *azureBlobClient
}
-// azureBlobClient wraps storage.BlobStorageClient in order to count
-// I/O and API usage stats.
-type azureBlobClient struct {
- client *storage.BlobStorageClient
- stats azureBlobStats
-}
-
-type azureBlobStats struct {
- statsTicker
- Ops uint64
- GetOps uint64
- GetRangeOps uint64
- CreateOps uint64
- SetMetadataOps uint64
- DelOps uint64
- ListOps uint64
-
- lock sync.Mutex
-}
-
// Examples implements VolumeWithExamples.
func (*AzureBlobVolume) Examples() []Volume {
return []Volume{
@@ -652,6 +632,36 @@ func (v *AzureBlobVolume) InternalStats() interface{} {
return &v.bsClient.stats
}
+type azureBlobStats struct {
+ statsTicker
+ Ops uint64
+ GetOps uint64
+ GetRangeOps uint64
+ CreateOps uint64
+ SetMetadataOps uint64
+ DelOps uint64
+ ListOps uint64
+}
+
+func (s *azureBlobStats) TickErr(err error) {
+ if err == nil {
+ return
+ }
+ errType := fmt.Sprintf("%T", err)
+ if err, ok := err.(storage.AzureStorageServiceError); ok {
+ errType = errType + fmt.Sprintf(" %d (%s)", err.StatusCode, err.Code)
+ }
+ log.Printf("errType %T, err %s", err, err)
+ s.statsTicker.TickErr(err, errType)
+}
+
+// azureBlobClient wraps storage.BlobStorageClient in order to count
+// I/O and API usage stats.
+type azureBlobClient struct {
+ client *storage.BlobStorageClient
+ stats azureBlobStats
+}
+
func (c *azureBlobClient) ContainerExists(cname string) (bool, error) {
c.stats.Tick(&c.stats.Ops)
ok, err := c.client.ContainerExists(cname)
diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
index 7bccafc..4b015a9 100644
--- a/services/keepstore/azure_blob_volume_test.go
+++ b/services/keepstore/azure_blob_volume_test.go
@@ -495,10 +495,10 @@ func TestAzureBlobVolumeRangeFenceposts(t *testing.T) {
}
gotHash := fmt.Sprintf("%x", md5.Sum(gotData))
if gotLen != size {
- t.Error("length mismatch: got %d != %d", gotLen, size)
+ t.Errorf("length mismatch: got %d != %d", gotLen, size)
}
if gotHash != hash {
- t.Error("hash mismatch: got %s != %s", gotHash, hash)
+ t.Errorf("hash mismatch: got %s != %s", gotHash, hash)
}
}
}
@@ -684,6 +684,7 @@ func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
c.Check(err, check.NotNil)
c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
+ c.Check(stats(), check.Matches, `.*"storage\.AzureStorageServiceError 404 \(404 Not Found\)":[^0].*`)
c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
err = s.volume.Put(context.Background(), loc, []byte("foo"))
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index c1d2105..d34b877 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -872,35 +872,35 @@ type s3bucket struct {
func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
rdr, err := b.Bucket.GetReader(path)
b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
- b.stats.tickErr(err)
+ b.stats.TickErr(err)
return NewCountingReader(rdr, b.stats.TickInBytes), err
}
func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
resp, err := b.Bucket.Head(path, headers)
b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
- b.stats.tickErr(err)
+ b.stats.TickErr(err)
return resp, err
}
func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
err := b.Bucket.PutReader(path, NewCountingReader(r, b.stats.TickOutBytes), length, contType, perm, options)
b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
- b.stats.tickErr(err)
+ b.stats.TickErr(err)
return err
}
func (b *s3bucket) Put(path string, data []byte, contType string, perm s3.ACL, options s3.Options) error {
err := b.Bucket.PutReader(path, NewCountingReader(bytes.NewBuffer(data), b.stats.TickOutBytes), int64(len(data)), contType, perm, options)
b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
- b.stats.tickErr(err)
+ b.stats.TickErr(err)
return err
}
func (b *s3bucket) Del(path string) error {
err := b.Bucket.Del(path)
b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
- b.stats.tickErr(err)
+ b.stats.TickErr(err)
return err
}
@@ -912,25 +912,15 @@ type s3bucketStats struct {
HeadOps uint64
DelOps uint64
ListOps uint64
-
- ErrorCodes map[string]uint64 `json:",omitempty"`
-
- lock sync.Mutex
}
-func (s *s3bucketStats) tickErr(err error) {
+func (s *s3bucketStats) TickErr(err error) {
if err == nil {
return
}
- s.TickErr(err)
- errStr := fmt.Sprintf("%T", err)
+ errType := fmt.Sprintf("%T", err)
if err, ok := err.(*s3.Error); ok {
- errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
- }
- s.lock.Lock()
- if s.ErrorCodes == nil {
- s.ErrorCodes = make(map[string]uint64)
+ errType = errType + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
}
- s.ErrorCodes[errStr]++
- s.lock.Unlock()
+ s.statsTicker.TickErr(err, errType)
}
diff --git a/services/keepstore/stats.go b/services/keepstore/stats.go
index 02d260c..e02eb3a 100644
--- a/services/keepstore/stats.go
+++ b/services/keepstore/stats.go
@@ -1,6 +1,7 @@
package main
import (
+ "sync"
"sync/atomic"
)
@@ -8,6 +9,9 @@ type statsTicker struct {
Errors uint64
InBytes uint64
OutBytes uint64
+
+ ErrorCodes map[string]uint64 `json:",omitempty"`
+ lock sync.Mutex
}
// Tick increments each of the given counters by 1 using
@@ -18,11 +22,18 @@ func (s *statsTicker) Tick(counters ...*uint64) {
}
}
-func (s *statsTicker) TickErr(err error) {
+func (s *statsTicker) TickErr(err error, errType string) {
if err == nil {
return
}
s.Tick(&s.Errors)
+
+ s.lock.Lock()
+ if s.ErrorCodes == nil {
+ s.ErrorCodes = make(map[string]uint64)
+ }
+ s.ErrorCodes[errType]++
+ s.lock.Unlock()
}
func (s *statsTicker) TickInBytes(n uint64) {
commit 3121f0dfb9262ccd50d0637c9f7cedf9191f69bf
Author: Tom Clegg <tom at curoverse.com>
Date: Mon Jan 2 13:57:31 2017 -0500
10682: Add backend stats for Azure volumes.
diff --git a/services/keepstore/azure_blob_volume.go b/services/keepstore/azure_blob_volume.go
index 7534489..3837598 100644
--- a/services/keepstore/azure_blob_volume.go
+++ b/services/keepstore/azure_blob_volume.go
@@ -103,7 +103,27 @@ type AzureBlobVolume struct {
RequestTimeout arvados.Duration
azClient storage.Client
- bsClient storage.BlobStorageClient
+ bsClient *azureBlobClient
+}
+
+// azureBlobClient wraps storage.BlobStorageClient in order to count
+// I/O and API usage stats.
+type azureBlobClient struct {
+ client *storage.BlobStorageClient
+ stats azureBlobStats
+}
+
+type azureBlobStats struct {
+ statsTicker
+ Ops uint64
+ GetOps uint64
+ GetRangeOps uint64
+ CreateOps uint64
+ SetMetadataOps uint64
+ DelOps uint64
+ ListOps uint64
+
+ lock sync.Mutex
}
// Examples implements VolumeWithExamples.
@@ -147,7 +167,10 @@ func (v *AzureBlobVolume) Start() error {
v.azClient.HTTPClient = &http.Client{
Timeout: time.Duration(v.RequestTimeout),
}
- v.bsClient = v.azClient.GetBlobService()
+ bs := v.azClient.GetBlobService()
+ v.bsClient = &azureBlobClient{
+ client: &bs,
+ }
ok, err := v.bsClient.ContainerExists(v.ContainerName)
if err != nil {
@@ -623,3 +646,72 @@ func (v *AzureBlobVolume) EmptyTrash() {
log.Printf("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
}
+
+// InternalStats returns bucket I/O and API call counters.
+func (v *AzureBlobVolume) InternalStats() interface{} {
+ return &v.bsClient.stats
+}
+
+func (c *azureBlobClient) ContainerExists(cname string) (bool, error) {
+ c.stats.Tick(&c.stats.Ops)
+ ok, err := c.client.ContainerExists(cname)
+ c.stats.TickErr(err)
+ return ok, err
+}
+
+func (c *azureBlobClient) GetBlobMetadata(cname, bname string) (map[string]string, error) {
+ c.stats.Tick(&c.stats.Ops)
+ m, err := c.client.GetBlobMetadata(cname, bname)
+ c.stats.TickErr(err)
+ return m, err
+}
+
+func (c *azureBlobClient) GetBlobProperties(cname, bname string) (*storage.BlobProperties, error) {
+ c.stats.Tick(&c.stats.Ops)
+ p, err := c.client.GetBlobProperties(cname, bname)
+ c.stats.TickErr(err)
+ return p, err
+}
+
+func (c *azureBlobClient) GetBlob(cname, bname string) (io.ReadCloser, error) {
+ c.stats.Tick(&c.stats.Ops, &c.stats.GetOps)
+ rdr, err := c.client.GetBlob(cname, bname)
+ c.stats.TickErr(err)
+ return NewCountingReader(rdr, c.stats.TickInBytes), err
+}
+
+func (c *azureBlobClient) GetBlobRange(cname, bname, byterange string, hdrs map[string]string) (io.ReadCloser, error) {
+ c.stats.Tick(&c.stats.Ops, &c.stats.GetRangeOps)
+ rdr, err := c.client.GetBlobRange(cname, bname, byterange, hdrs)
+ c.stats.TickErr(err)
+ return NewCountingReader(rdr, c.stats.TickInBytes), err
+}
+
+func (c *azureBlobClient) CreateBlockBlobFromReader(cname, bname string, size uint64, rdr io.Reader, hdrs map[string]string) error {
+ c.stats.Tick(&c.stats.Ops, &c.stats.CreateOps)
+ rdr = NewCountingReader(rdr, c.stats.TickOutBytes)
+ err := c.client.CreateBlockBlobFromReader(cname, bname, size, rdr, hdrs)
+ c.stats.TickErr(err)
+ return err
+}
+
+func (c *azureBlobClient) SetBlobMetadata(cname, bname string, m, hdrs map[string]string) error {
+ c.stats.Tick(&c.stats.Ops, &c.stats.SetMetadataOps)
+ err := c.client.SetBlobMetadata(cname, bname, m, hdrs)
+ c.stats.TickErr(err)
+ return err
+}
+
+func (c *azureBlobClient) ListBlobs(cname string, params storage.ListBlobsParameters) (storage.BlobListResponse, error) {
+ c.stats.Tick(&c.stats.Ops, &c.stats.ListOps)
+ resp, err := c.client.ListBlobs(cname, params)
+ c.stats.TickErr(err)
+ return resp, err
+}
+
+func (c *azureBlobClient) DeleteBlob(cname, bname string, hdrs map[string]string) error {
+ c.stats.Tick(&c.stats.Ops, &c.stats.DelOps)
+ err := c.client.DeleteBlob(cname, bname, hdrs)
+ c.stats.TickErr(err)
+ return err
+}
diff --git a/services/keepstore/azure_blob_volume_test.go b/services/keepstore/azure_blob_volume_test.go
index f4498f4..7bccafc 100644
--- a/services/keepstore/azure_blob_volume_test.go
+++ b/services/keepstore/azure_blob_volume_test.go
@@ -5,6 +5,7 @@ import (
"context"
"crypto/md5"
"encoding/base64"
+ "encoding/json"
"encoding/xml"
"flag"
"fmt"
@@ -23,6 +24,7 @@ import (
log "github.com/Sirupsen/logrus"
"github.com/curoverse/azure-sdk-for-go/storage"
+ check "gopkg.in/check.v1"
)
const (
@@ -369,12 +371,13 @@ func NewTestableAzureBlobVolume(t TB, readonly bool, replication int) *TestableA
}
}
+ bs := azClient.GetBlobService()
v := &AzureBlobVolume{
ContainerName: container,
ReadOnly: readonly,
AzureReplication: replication,
azClient: azClient,
- bsClient: azClient.GetBlobService(),
+ bsClient: &azureBlobClient{client: &bs},
}
return &TestableAzureBlobVolume{
@@ -385,6 +388,29 @@ func NewTestableAzureBlobVolume(t TB, readonly bool, replication int) *TestableA
}
}
+var _ = check.Suite(&StubbedAzureBlobSuite{})
+
+type StubbedAzureBlobSuite struct {
+ volume *TestableAzureBlobVolume
+ origHTTPTransport http.RoundTripper
+}
+
+func (s *StubbedAzureBlobSuite) SetUpTest(c *check.C) {
+ s.origHTTPTransport = http.DefaultTransport
+ http.DefaultTransport = &http.Transport{
+ Dial: (&azStubDialer{}).Dial,
+ }
+ azureWriteRaceInterval = time.Millisecond
+ azureWriteRacePollTime = time.Nanosecond
+
+ s.volume = NewTestableAzureBlobVolume(c, false, 3)
+}
+
+func (s *StubbedAzureBlobSuite) TearDownTest(c *check.C) {
+ s.volume.Teardown()
+ http.DefaultTransport = s.origHTTPTransport
+}
+
func TestAzureBlobVolumeWithGeneric(t *testing.T) {
defer func(t http.RoundTripper) {
http.DefaultTransport = t
@@ -643,6 +669,35 @@ func testAzureBlobVolumeContextCancel(t *testing.T, testFunc func(context.Contex
}()
}
+func (s *StubbedAzureBlobSuite) TestStats(c *check.C) {
+ stats := func() string {
+ buf, err := json.Marshal(s.volume.InternalStats())
+ c.Check(err, check.IsNil)
+ return string(buf)
+ }
+
+ c.Check(stats(), check.Matches, `.*"Ops":0,.*`)
+ c.Check(stats(), check.Matches, `.*"Errors":0,.*`)
+
+ loc := "acbd18db4cc2f85cedef654fccc4a4d8"
+ _, err := s.volume.Get(context.Background(), loc, make([]byte, 3))
+ c.Check(err, check.NotNil)
+ c.Check(stats(), check.Matches, `.*"Ops":[^0],.*`)
+ c.Check(stats(), check.Matches, `.*"Errors":[^0],.*`)
+ c.Check(stats(), check.Matches, `.*"InBytes":0,.*`)
+
+ err = s.volume.Put(context.Background(), loc, []byte("foo"))
+ c.Check(err, check.IsNil)
+ c.Check(stats(), check.Matches, `.*"OutBytes":3,.*`)
+ c.Check(stats(), check.Matches, `.*"CreateOps":1,.*`)
+
+ _, err = s.volume.Get(context.Background(), loc, make([]byte, 3))
+ c.Check(err, check.IsNil)
+ _, err = s.volume.Get(context.Background(), loc, make([]byte, 3))
+ c.Check(err, check.IsNil)
+ c.Check(stats(), check.Matches, `.*"InBytes":6,.*`)
+}
+
func (v *TestableAzureBlobVolume) PutRaw(locator string, data []byte) {
v.azHandler.PutRaw(v.ContainerName, locator, data)
}
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index ca5b1a2..c1d2105 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -14,7 +14,6 @@ import (
"regexp"
"strings"
"sync"
- "sync/atomic"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
@@ -456,10 +455,10 @@ func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
Prefix: "recent/" + prefix,
PageSize: v.IndexPageSize,
}
- v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
- v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
for data, recent := dataL.First(), recentL.First(); data != nil; data = dataL.Next() {
- v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
if data.Key >= "g" {
// Conveniently, "recent/*" and "trash/*" are
// lexically greater than all hex-encoded data
@@ -481,12 +480,12 @@ func (v *S3Volume) IndexTo(prefix string, writer io.Writer) error {
for recent != nil {
if cmp := strings.Compare(recent.Key[7:], data.Key); cmp < 0 {
recent = recentL.Next()
- v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
continue
} else if cmp == 0 {
stamp = recent
recent = recentL.Next()
- v.bucket.stats.tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
+ v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.ListOps)
break
} else {
// recent/X marker is missing: we'll
@@ -872,74 +871,58 @@ type s3bucket struct {
func (b *s3bucket) GetReader(path string) (io.ReadCloser, error) {
rdr, err := b.Bucket.GetReader(path)
- b.stats.tick(&b.stats.Ops, &b.stats.GetOps)
+ b.stats.Tick(&b.stats.Ops, &b.stats.GetOps)
b.stats.tickErr(err)
- return NewCountingReader(rdr, b.stats.tickInBytes), err
+ return NewCountingReader(rdr, b.stats.TickInBytes), err
}
func (b *s3bucket) Head(path string, headers map[string][]string) (*http.Response, error) {
resp, err := b.Bucket.Head(path, headers)
- b.stats.tick(&b.stats.Ops, &b.stats.HeadOps)
+ b.stats.Tick(&b.stats.Ops, &b.stats.HeadOps)
b.stats.tickErr(err)
return resp, err
}
func (b *s3bucket) PutReader(path string, r io.Reader, length int64, contType string, perm s3.ACL, options s3.Options) error {
- err := b.Bucket.PutReader(path, NewCountingReader(r, b.stats.tickOutBytes), length, contType, perm, options)
- b.stats.tick(&b.stats.Ops, &b.stats.PutOps)
+ err := b.Bucket.PutReader(path, NewCountingReader(r, b.stats.TickOutBytes), length, contType, perm, options)
+ b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
b.stats.tickErr(err)
return err
}
func (b *s3bucket) Put(path string, data []byte, contType string, perm s3.ACL, options s3.Options) error {
- err := b.Bucket.PutReader(path, NewCountingReader(bytes.NewBuffer(data), b.stats.tickOutBytes), int64(len(data)), contType, perm, options)
- b.stats.tick(&b.stats.Ops, &b.stats.PutOps)
+ err := b.Bucket.PutReader(path, NewCountingReader(bytes.NewBuffer(data), b.stats.TickOutBytes), int64(len(data)), contType, perm, options)
+ b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
b.stats.tickErr(err)
return err
}
func (b *s3bucket) Del(path string) error {
err := b.Bucket.Del(path)
- b.stats.tick(&b.stats.Ops, &b.stats.DelOps)
+ b.stats.Tick(&b.stats.Ops, &b.stats.DelOps)
b.stats.tickErr(err)
return err
}
type s3bucketStats struct {
- Errors uint64
- Ops uint64
- GetOps uint64
- PutOps uint64
- HeadOps uint64
- DelOps uint64
- ListOps uint64
- InBytes uint64
- OutBytes uint64
+ statsTicker
+ Ops uint64
+ GetOps uint64
+ PutOps uint64
+ HeadOps uint64
+ DelOps uint64
+ ListOps uint64
ErrorCodes map[string]uint64 `json:",omitempty"`
lock sync.Mutex
}
-func (s *s3bucketStats) tickInBytes(n uint64) {
- atomic.AddUint64(&s.InBytes, n)
-}
-
-func (s *s3bucketStats) tickOutBytes(n uint64) {
- atomic.AddUint64(&s.OutBytes, n)
-}
-
-func (s *s3bucketStats) tick(counters ...*uint64) {
- for _, counter := range counters {
- atomic.AddUint64(counter, 1)
- }
-}
-
func (s *s3bucketStats) tickErr(err error) {
if err == nil {
return
}
- atomic.AddUint64(&s.Errors, 1)
+ s.TickErr(err)
errStr := fmt.Sprintf("%T", err)
if err, ok := err.(*s3.Error); ok {
errStr = errStr + fmt.Sprintf(" %d %s", err.StatusCode, err.Code)
diff --git a/services/keepstore/stats.go b/services/keepstore/stats.go
new file mode 100644
index 0000000..02d260c
--- /dev/null
+++ b/services/keepstore/stats.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "sync/atomic"
+)
+
+type statsTicker struct {
+ Errors uint64
+ InBytes uint64
+ OutBytes uint64
+}
+
+// Tick increments each of the given counters by 1 using
+// atomic.AddUint64.
+func (s *statsTicker) Tick(counters ...*uint64) {
+ for _, counter := range counters {
+ atomic.AddUint64(counter, 1)
+ }
+}
+
+func (s *statsTicker) TickErr(err error) {
+ if err == nil {
+ return
+ }
+ s.Tick(&s.Errors)
+}
+
+func (s *statsTicker) TickInBytes(n uint64) {
+ atomic.AddUint64(&s.InBytes, n)
+}
+
+func (s *statsTicker) TickOutBytes(n uint64) {
+ atomic.AddUint64(&s.OutBytes, n)
+}
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list