[ARVADOS] updated: 1.3.0-2814-g95babd9e2
Git user
git at public.arvados.org
Wed Jul 29 15:58:31 UTC 2020
Summary of changes:
...configure-s3-object-storage.html.textile.liquid | 20 +--
lib/config/config.default.yml | 2 +-
lib/config/generated_config.go | 2 +-
sdk/go/arvados/config.go | 4 +-
services/keepstore/s3_volume.go | 2 +-
services/keepstore/s3aws_volume.go | 147 +++++++++------------
services/keepstore/s3aws_volume_test.go | 18 +--
7 files changed, 77 insertions(+), 118 deletions(-)
via 95babd9e21eb871eed9535fad3d2af8ecdeb471d (commit)
from 8f3b2dedef2677654197e9838939d9abe7cc3791 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 95babd9e21eb871eed9535fad3d2af8ecdeb471d
Author: Ward Vandewege <ward at curii.com>
Date: Wed Jul 29 11:58:16 2020 -0400
10477: implement review comments.
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <ward at curii.com>
diff --git a/doc/install/configure-s3-object-storage.html.textile.liquid b/doc/install/configure-s3-object-storage.html.textile.liquid
index 40cbbb533..76a2f3ab5 100644
--- a/doc/install/configure-s3-object-storage.html.textile.liquid
+++ b/doc/install/configure-s3-object-storage.html.textile.liquid
@@ -64,8 +64,8 @@ Volumes are configured in the @Volumes@ section of the cluster configuration fil
# might be needed for other S3-compatible services.
V2Signature: false
- # Use the AWS S3 Go driver instead of the goamz driver.
- AlternateDriver: false
+ # Use the AWS S3 v2 Go driver instead of the goamz driver.
+ UseAWSS3v2Driver: false
# Requested page size for "list bucket contents" requests.
IndexPageSize: 1000
@@ -98,20 +98,8 @@ Volumes are configured in the @Volumes@ section of the cluster configuration fil
StorageClasses: null
</code></pre></notextile>
-Two S3 drivers are available. Historically, Arvados has used the @goamz@ driver to talk to S3-compatible services. More recently, support for the @aws-sdk-go-v2@ driver was added. This driver can be activated by setting the @AlternateDriver@ flag to @true at .
+Two S3 drivers are available. Historically, Arvados has used the @goamz@ driver to talk to S3-compatible services. More recently, support for the @aws-sdk-go-v2@ driver was added. This driver can be activated by setting the @UseAWSS3v2Driver@ flag to @true at .
The @aws-sdk-go-v2@ does not support the old S3 v2 signing algorithm. This will not affect interacting with AWS S3, but it might be an issue when Keep is backed by a very old version of a third party S3-compatible service.
-The @aws-sdk-go-v2@ driver has faster _single thread_ read and write performance than the @goamz@ driver. Here are some benchmark numbers against AWS S3, as measured in July 2020. They were generated with the @keep-exercise@ tool in an Arvados installation with one dedicated Keepstore node (c5n.2xlarge) and one dedicated node for running @keep-exercise@ (c5n.2xlarge). The Keepstore node was backed by one S3 bucket, in a VPC with an S3 endpoint installed. Versioning, Server Access logging, Static website hosting, Object-level logging and Default encryption were disabled. Object lock, Transfer acceleration and Requester pays were also disabled. There were no Active notifications. Each test consisted of 4 one minute runs, which were averaged into one number. The tests were repeated 3 times, and of those 3 runs, the highest average speed was selected and included in the table below.
-
-table(table table-bordered table-condensed).
-||_. goamz |_. aws-sdk-go-v2 |_. command line|
-|single thread read performance (average)|32.53 MiB/s|79.48 MiB/s|keep-exercise -repeat 4 -run-time 60s -vary-request -use-index -rthreads 1 -wthreads 1|
-|single thread write performance (average)|39.75 MiB/s|41.05 MiB/s|keep-exercise -repeat 4 -run-time 60s -vary-request -use-index -rthreads 1 -wthreads 1|
-
-Because both S3 and Keep are optimized for _aggregate_ througput, the single thread performance is not as important as it may seem at first glance. When using 20 concurrent read or write threads, the numbers from both drivers are more closely aligned:
-
-table(table table-bordered table-condensed).
-||_. goamz |_. aws-sdk-go-v2 |_. command line|
-|20 thread read performance (average)|585.60 MiB/s|898.93 MiB/s|keep-exercise -repeat 4 -run-time 60s -vary-request -use-index -rthreads 20 -wthreads 0|
-|20 thread write performance (average)|610.40 MiB/s|688.25 MiB/s|keep-exercise -repeat 4 -run-time 60s -vary-request -use-index -rthreads 0 -wthreads 20|
+The @aws-sdk-go-v2@ driver can improve read performance by 50-100% over the @goamz@ driver, but it has not had as much production use. See the "wiki":https://dev.arvados.org/projects/arvados/wiki/Keep_real_world_performance_numbers for details.
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index d2ccefe8b..01d399943 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -1073,7 +1073,7 @@ Clusters:
ReadTimeout: 10m
RaceWindow: 24h
# Use aws-s3-go (v2) instead of goamz
- AlternateDriver: false
+ UseAWSS3v2Driver: false
# For S3 driver, potentially unsafe tuning parameter,
# intentionally excluded from main documentation.
diff --git a/lib/config/generated_config.go b/lib/config/generated_config.go
index d6d198429..508652a8a 100644
--- a/lib/config/generated_config.go
+++ b/lib/config/generated_config.go
@@ -1079,7 +1079,7 @@ Clusters:
ReadTimeout: 10m
RaceWindow: 24h
# Use aws-s3-go (v2) instead of goamz
- AlternateDriver: false
+ UseAWSS3v2Driver: false
# For S3 driver, potentially unsafe tuning parameter,
# intentionally excluded from main documentation.
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index 515dc7973..9cf1ed3cd 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -277,7 +277,7 @@ type S3VolumeDriverParameters struct {
Bucket string
LocationConstraint bool
V2Signature bool
- AlternateDriver bool
+ UseAWSS3v2Driver bool
IndexPageSize int
ConnectTimeout Duration
ReadTimeout Duration
@@ -553,7 +553,7 @@ func (ss *StringSet) UnmarshalJSON(data []byte) error {
return err
}
*ss = make(map[string]struct{}, len(hash))
- for t, _ := range hash {
+ for t := range hash {
(*ss)[t] = struct{}{}
}
diff --git a/services/keepstore/s3_volume.go b/services/keepstore/s3_volume.go
index 8e32e592b..235d369b5 100644
--- a/services/keepstore/s3_volume.go
+++ b/services/keepstore/s3_volume.go
@@ -37,7 +37,7 @@ func init() {
func newS3Volume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, &v)
+ err := json.Unmarshal(volume.DriverParameters, v)
if err != nil {
return nil, err
}
diff --git a/services/keepstore/s3aws_volume.go b/services/keepstore/s3aws_volume.go
index 9bbb3c5b1..c9fa7fce5 100644
--- a/services/keepstore/s3aws_volume.go
+++ b/services/keepstore/s3aws_volume.go
@@ -59,22 +59,28 @@ type s3AWSbucket struct {
}
// chooseS3VolumeDriver distinguishes between the old goamz driver and
-// aws-sdk-go based on the AlternateDriver feature flag
+// aws-sdk-go based on the UseAWSS3v2Driver feature flag
func chooseS3VolumeDriver(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
v := &S3Volume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, &v)
+ err := json.Unmarshal(volume.DriverParameters, v)
if err != nil {
return nil, err
}
- if v.AlternateDriver {
- logger.Debugln("Using alternate S3 driver (aws-go)")
+ if v.UseAWSS3v2Driver {
+ logger.Debugln("Using AWS S3 v2 driver")
return newS3AWSVolume(cluster, volume, logger, metrics)
} else {
- logger.Debugln("Using standard S3 driver (goamz)")
+ logger.Debugln("Using goamz S3 driver")
return newS3Volume(cluster, volume, logger, metrics)
}
}
+const (
+ PartSize = 5 * 1024 * 1024
+ ReadConcurrency = 13
+ WriteConcurrency = 5
+)
+
var s3AWSKeepBlockRegexp = regexp.MustCompile(`^[0-9a-f]{32}$`)
var s3AWSZeroTime time.Time
@@ -83,14 +89,12 @@ func (v *S3AWSVolume) isKeepBlock(s string) bool {
}
func newS3AWSVolume(cluster *arvados.Cluster, volume arvados.Volume, logger logrus.FieldLogger, metrics *volumeMetricsVecs) (Volume, error) {
- logger.Debugln("in newS3AWSVolume")
v := &S3AWSVolume{cluster: cluster, volume: volume, metrics: metrics}
- err := json.Unmarshal(volume.DriverParameters, &v)
+ err := json.Unmarshal(volume.DriverParameters, v)
if err != nil {
return nil, err
}
v.logger = logger.WithField("Volume", v.String())
- v.logger.Debugln("in newS3AWSVolume after volume set")
return v, v.check("")
}
@@ -148,6 +152,10 @@ func (v *S3AWSVolume) check(ec2metadataHostname string) error {
return errors.New("DriverParameters: RaceWindow must not be negative")
}
+ if v.V2Signature {
+ return errors.New("DriverParameters: V2Signature is not supported")
+ }
+
defaultResolver := endpoints.NewDefaultResolver()
cfg := defaults.Config()
@@ -276,7 +284,6 @@ func (v *S3AWSVolume) EmptyTrash() {
startT := time.Now()
emptyOneKey := func(trash *s3.Object) {
- v.logger.Warnf("EmptyTrash: looking for trash marker %s with last modified date %s", *trash.Key, *trash.LastModified)
loc := strings.TrimPrefix(*trash.Key, "trash/")
if !v.isKeepBlock(loc) {
return
@@ -285,7 +292,6 @@ func (v *S3AWSVolume) EmptyTrash() {
atomic.AddInt64(&blocksInTrash, 1)
trashT := *(trash.LastModified)
- v.logger.Infof("HEEEEEEE trashT key: %s, type: %T val: %s, startT is %s", *trash.Key, trashT, trashT, startT)
recent, err := v.Head("recent/" + loc)
if err != nil && os.IsNotExist(v.translateError(err)) {
v.logger.Warnf("EmptyTrash: found trash marker %q but no %q (%s); calling Untrash", trash.Key, "recent/"+loc, err)
@@ -298,9 +304,7 @@ func (v *S3AWSVolume) EmptyTrash() {
v.logger.WithError(err).Warnf("EmptyTrash: HEAD %q failed", "recent/"+loc)
return
}
- v.logger.Infof("recent.LastModified type: %T val: %s", recent.LastModified, recent.LastModified)
if trashT.Sub(*recent.LastModified) < v.cluster.Collections.BlobSigningTTL.Duration() {
- v.logger.Infof("HERE! recent.lastmodified is smaller than blobsigningttl")
if age := startT.Sub(*recent.LastModified); age >= v.cluster.Collections.BlobSigningTTL.Duration()-time.Duration(v.RaceWindow) {
// recent/loc is too old to protect
// loc from being Trashed again during
@@ -326,7 +330,6 @@ func (v *S3AWSVolume) EmptyTrash() {
}
}
if startT.Sub(trashT) < v.cluster.Collections.BlobTrashLifetime.Duration() {
- v.logger.Infof("HERE! trashT for %s is smaller than blobtrashlifetime: %s < %s", *trash.Key, startT.Sub(trashT), v.cluster.Collections.BlobTrashLifetime.Duration())
return
}
err = v.bucket.Del(*trash.Key)
@@ -337,7 +340,6 @@ func (v *S3AWSVolume) EmptyTrash() {
atomic.AddInt64(&bytesDeleted, *trash.Size)
atomic.AddInt64(&blocksDeleted, 1)
- v.logger.Infof("HERE! trash.Key %s should have been deleted", *trash.Key)
_, err = v.Head(loc)
if err == nil {
v.logger.Warnf("EmptyTrash: HEAD %q succeeded immediately after deleting %q", loc, loc)
@@ -351,7 +353,6 @@ func (v *S3AWSVolume) EmptyTrash() {
if err != nil {
v.logger.WithError(err).Warnf("EmptyTrash: error deleting %q", "recent/"+loc)
}
- v.logger.Infof("HERE! recent/%s should have been deleted", loc)
}
var wg sync.WaitGroup
@@ -382,7 +383,7 @@ func (v *S3AWSVolume) EmptyTrash() {
if err := trashL.Error(); err != nil {
v.logger.WithError(err).Error("EmptyTrash: lister failed")
}
- v.logger.Infof("EmptyTrash stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
+ v.logger.Infof("EmptyTrash: stats for %v: Deleted %v bytes in %v blocks. Remaining in trash: %v bytes in %v blocks.", v.String(), bytesDeleted, blocksDeleted, bytesInTrash-bytesDeleted, blocksInTrash-blocksDeleted)
}
// fixRace(X) is called when "recent/X" exists but "X" doesn't
@@ -454,8 +455,8 @@ func (v *S3AWSVolume) readWorker(ctx context.Context, loc string) (rdr io.ReadCl
awsBuf := aws.NewWriteAtBuffer(buf)
downloader := s3manager.NewDownloaderWithClient(v.bucket.svc, func(u *s3manager.Downloader) {
- u.PartSize = 5 * 1024 * 1024
- u.Concurrency = 13
+ u.PartSize = PartSize
+ u.Concurrency = ReadConcurrency
})
v.logger.Debugf("Partsize: %d; Concurrency: %d\n", downloader.PartSize, downloader.Concurrency)
@@ -517,70 +518,36 @@ func (v *S3AWSVolume) ReadBlock(ctx context.Context, loc string, w io.Writer) er
return err
}
-func (b *s3AWSbucket) PutReader(path string, r io.Reader, length int64, contType string, contentMD5 string, contentSHA256 string) error {
- if length == 0 {
- // aws-sdk-go will only send Content-Length: 0 when reader
- // is nil due to net.http.Request.ContentLength
- // behavior. Otherwise, Content-Length header is
- // omitted which will cause some S3 services
- // (including AWS and Ceph RadosGW) to fail to create
- // empty objects.
- r = bytes.NewReader([]byte{})
- } else {
- r = NewCountingReader(r, b.stats.TickOutBytes)
+func (v *S3AWSVolume) writeObject(ctx context.Context, name string, r io.Reader) error {
+ if r == nil {
+ // r == nil leads to a memory violation in func readFillBuf in
+ // aws-sdk-go-v2 at v0.23.0/service/s3/s3manager/upload.go
+ r = bytes.NewReader(nil)
}
- uploader := s3manager.NewUploaderWithClient(b.svc)
- _, err := uploader.Upload(&s3manager.UploadInput{
- Bucket: aws.String(b.bucket),
- Key: aws.String(path),
- Body: r,
- }, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
- r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
- }))
- b.stats.TickOps("put")
- b.stats.Tick(&b.stats.Ops, &b.stats.PutOps)
- b.stats.TickErr(err)
- return err
-}
-
-// Put writes a block.
-func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
- return putWithPipe(ctx, loc, block, v)
-}
-
-// WriteBlock implements BlockWriter.
-func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
- if v.volume.ReadOnly {
- return MethodDisabledError
- }
-
- r := NewCountingReader(rdr, v.bucket.stats.TickOutBytes)
uploadInput := s3manager.UploadInput{
Bucket: aws.String(v.bucket.bucket),
- Key: aws.String(loc),
+ Key: aws.String(name),
Body: r,
}
- //var contentMD5, contentSHA256 string
- var contentMD5 string
- md5, err := hex.DecodeString(loc)
- if err != nil {
- return err
- }
- contentMD5 = base64.StdEncoding.EncodeToString(md5)
- // See if this is the empty block
- if contentMD5 != "d41d8cd98f00b204e9800998ecf8427e" {
+ if len(name) == 32 {
+ var contentMD5 string
+ md5, err := hex.DecodeString(name)
+ if err != nil {
+ return err
+ }
+ contentMD5 = base64.StdEncoding.EncodeToString(md5)
uploadInput.ContentMD5 = &contentMD5
}
- // Some experimentation indicated that using concurrency 5 yields the best
+ // Experimentation indicated that using concurrency 5 yields the best
// throughput, better than higher concurrency (10 or 13) by ~5%.
// Defining u.BufferProvider = s3manager.NewBufferedReadSeekerWriteToPool(64 * 1024 * 1024)
// is detrimental to througput (minus ~15%).
uploader := s3manager.NewUploaderWithClient(v.bucket.svc, func(u *s3manager.Uploader) {
- u.PartSize = 5 * 1024 * 1024
- u.Concurrency = 5
+ u.PartSize = PartSize
+ u.Concurrency = WriteConcurrency
})
// Unlike the goamz S3 driver, we don't need to precompute ContentSHA256:
@@ -589,35 +556,39 @@ func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader)
// makeSha256Reader in aws/signer/v4/v4.go. In fact, we explicitly disable
// calculating the Sha-256 because we don't need it; we already use md5sum
// hashes that match the name of the block.
- _, err = uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
+ _, err := uploader.UploadWithContext(ctx, &uploadInput, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
}))
v.bucket.stats.TickOps("put")
v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
v.bucket.stats.TickErr(err)
+
+ return err
+}
+
+// Put writes a block.
+func (v *S3AWSVolume) Put(ctx context.Context, loc string, block []byte) error {
+ return putWithPipe(ctx, loc, block, v)
+}
+
+// WriteBlock implements BlockWriter.
+func (v *S3AWSVolume) WriteBlock(ctx context.Context, loc string, rdr io.Reader) error {
+ if v.volume.ReadOnly {
+ return MethodDisabledError
+ }
+
+ r := NewCountingReader(rdr, v.bucket.stats.TickOutBytes)
+ err := v.writeObject(ctx, loc, r)
if err != nil {
return err
}
-
- empty := bytes.NewReader([]byte{})
- _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
- Bucket: aws.String(v.bucket.bucket),
- Key: aws.String("recent/" + loc),
- Body: empty,
- }, s3manager.WithUploaderRequestOptions(func(r *aws.Request) {
- r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
- }))
- v.bucket.stats.TickOps("put")
- v.bucket.stats.Tick(&v.bucket.stats.Ops, &v.bucket.stats.PutOps)
- v.bucket.stats.TickErr(err)
-
- return err
+ return v.writeObject(ctx, "recent/"+loc, nil)
}
type s3awsLister struct {
Logger logrus.FieldLogger
- Bucket *s3AWSbucket //*s3.Bucket
+ Bucket *s3AWSbucket
Prefix string
PageSize int
Stats *s3awsbucketStats
@@ -772,12 +743,12 @@ func (v *S3AWSVolume) Mtime(loc string) (time.Time, error) {
err = v.translateError(err)
if os.IsNotExist(err) {
// The data object X exists, but recent/X is missing.
- err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
+ err = v.writeObject(context.Background(), "recent/"+loc, nil)
if err != nil {
v.logger.WithError(err).Errorf("error creating %q", "recent/"+loc)
return s3AWSZeroTime, v.translateError(err)
}
- v.logger.Infof("created %q to migrate existing block to new storage scheme", "recent/"+loc)
+ v.logger.Infof("Mtime: created %q to migrate existing block to new storage scheme", "recent/"+loc)
resp, err = v.Head("recent/" + loc)
if err != nil {
v.logger.WithError(err).Errorf("HEAD failed after creating %q", "recent/"+loc)
@@ -819,7 +790,7 @@ func (v *S3AWSVolume) Touch(loc string) error {
} else if err != nil {
return err
}
- err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
+ err = v.writeObject(context.Background(), "recent/"+loc, nil)
return v.translateError(err)
}
@@ -898,7 +869,7 @@ func (v *S3AWSVolume) Untrash(loc string) error {
if err != nil {
return err
}
- err = v.bucket.PutReader("recent/"+loc, nil, 0, "application/octet-stream", "", "")
+ err = v.writeObject(context.Background(), "recent/"+loc, nil)
return v.translateError(err)
}
diff --git a/services/keepstore/s3aws_volume_test.go b/services/keepstore/s3aws_volume_test.go
index 46fe07d16..57d81dbe0 100644
--- a/services/keepstore/s3aws_volume_test.go
+++ b/services/keepstore/s3aws_volume_test.go
@@ -167,7 +167,6 @@ func (s *StubbedS3AWSSuite) TestIAMRoleCredentials(c *check.C) {
}
err := v.check(s.metadata.URL + "/latest")
creds, err := v.bucket.svc.Client.Config.Credentials.Retrieve(context.Background())
- fmt.Printf("%+v, %s\n", creds, err)
c.Check(creds.AccessKeyID, check.Equals, "ASIAIOSFODNN7EXAMPLE")
c.Check(creds.SecretAccessKey, check.Equals, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY")
@@ -321,9 +320,8 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
return
}
v.serverClock.now = &t
- fmt.Printf("USING TIMESTAMP %s to write key %s", t, key)
uploader := s3manager.NewUploaderWithClient(v.bucket.svc)
- resp, err := uploader.UploadWithContext(context.Background(), &s3manager.UploadInput{
+ _, err := uploader.UploadWithContext(context.Background(), &s3manager.UploadInput{
Bucket: aws.String(v.bucket.bucket),
Key: aws.String(key),
Body: bytes.NewReader(data),
@@ -331,10 +329,11 @@ func (s *StubbedS3AWSSuite) TestBackendStates(c *check.C) {
if err != nil {
panic(err)
}
- fmt.Println(resp)
v.serverClock.now = nil
- resp2, err := v.Head(key)
- fmt.Printf("KEY: %s\n%s", key, resp2)
+ _, err = v.Head(key)
+ if err != nil {
+ panic(err)
+ }
}
t0 := time.Now()
@@ -544,10 +543,11 @@ func (s *StubbedS3AWSSuite) newTestableVolume(c *check.C, cluster *arvados.Clust
// fake s3
backend := s3mem.New(s3mem.WithTimeSource(clock))
- logger := new(LogrusLog)
+ // To enable GoFakeS3 debug logging, pass logger to gofakes3.WithLogger()
+ /* logger := new(LogrusLog)
ctxLogger := ctxlog.FromContext(context.Background())
- logger.log = &ctxLogger
- faker := gofakes3.New(backend, gofakes3.WithTimeSource(clock), gofakes3.WithLogger(logger), gofakes3.WithTimeSkewLimit(0))
+ logger.log = &ctxLogger */
+ faker := gofakes3.New(backend, gofakes3.WithTimeSource(clock), gofakes3.WithLogger(nil), gofakes3.WithTimeSkewLimit(0))
srv := httptest.NewServer(faker.Server())
endpoint := srv.URL
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list