[ARVADOS] created: 1.2.0-432-g9b3d58d6f
Git user
git at public.curoverse.com
Wed Nov 28 16:51:11 EST 2018
at 9b3d58d6fa3d7c300e006af16ce8072bb68eca30 (commit)
commit 9b3d58d6fa3d7c300e006af16ce8072bb68eca30
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date: Wed Nov 28 16:41:42 2018 -0500
14538: Use concurrent writers to flush chunks while writing.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index 58482142f..cd3dcf053 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -231,6 +231,7 @@ type filenode struct {
memsize int64 // bytes in memSegments
sync.RWMutex
nullnode
+ throttle *throttle
}
// caller must have lock
@@ -493,30 +494,75 @@ func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePt
// Write some data out to disk to reduce memory use. Caller must have
// write lock.
func (fn *filenode) pruneMemSegments() {
- // TODO: async (don't hold Lock() while waiting for Keep)
// TODO: share code with (*dirnode)sync()
// TODO: pack/flush small blocks too, when fragmented
+ if fn.throttle == nil {
+ // TODO: share a throttle with filesystem
+ fn.throttle = newThrottle(concurrentWriters)
+ }
for idx, seg := range fn.segments {
seg, ok := seg.(*memSegment)
- if !ok || seg.Len() < maxBlockSize {
- continue
- }
- locator, _, err := fn.FS().PutB(seg.buf)
- if err != nil {
- // TODO: stall (or return errors from)
- // subsequent writes until flushing
- // starts to succeed
+ if !ok || seg.Len() < maxBlockSize || seg.Len() == 0 || seg.flushing != nil {
continue
}
- fn.memsize -= int64(seg.Len())
- fn.segments[idx] = storedSegment{
- kc: fn.FS(),
- locator: locator,
- size: seg.Len(),
- offset: 0,
- length: seg.Len(),
+ // Setting seg.flushing guarantees seg.buf will not be
+ // modified in place: WriteAt and Truncate will
+ // allocate a new buf instead, if necessary.
+ idx, buf := idx, seg.buf
+ done := make(chan struct{})
+ seg.flushing = done
+ // If lots of background writes are already in
+ // progress, block here until one finishes, rather
+ // than pile up an unlimited number of buffered writes
+ // and network flush operations.
+ fn.throttle.Acquire()
+ go func() {
+ defer close(done)
+ locator, _, err := fn.FS().PutB(buf)
+ fn.throttle.Release()
+ fn.Lock()
+ defer fn.Unlock()
+ if curbuf := seg.buf[:1]; &curbuf[0] != &buf[0] {
+ // A new seg.buf has been allocated.
+ return
+ }
+ seg.flushing = nil
+ if err != nil {
+ // TODO: stall (or return errors from)
+ // subsequent writes until flushing
+ // starts to succeed.
+ return
+ }
+ if len(fn.segments) <= idx || fn.segments[idx] != seg || len(seg.buf) != len(buf) {
+ // Segment has been dropped/moved/resized.
+ return
+ }
+ fn.memsize -= int64(len(buf))
+ fn.segments[idx] = storedSegment{
+ kc: fn.FS(),
+ locator: locator,
+ size: len(buf),
+ offset: 0,
+ length: len(buf),
+ }
+ }()
+ }
+}
+
+// Block until all pending pruneMemSegments work is finished. Caller
+// must NOT have lock.
+func (fn *filenode) waitPrune() {
+ var pending []<-chan struct{}
+ fn.Lock()
+ for _, seg := range fn.segments {
+ if seg, ok := seg.(*memSegment); ok && seg.flushing != nil {
+ pending = append(pending, seg.flushing)
}
}
+ fn.Unlock()
+ for _, p := range pending {
+ <-p
+ }
}
type dirnode struct {
@@ -665,6 +711,16 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle
names = append(names, name)
}
sort.Strings(names)
+
+ // Wait for children to finish any pending write operations
+ // before locking them.
+ for _, name := range names {
+ node := dn.inodes[name]
+ if fn, ok := node.(*filenode); ok {
+ fn.waitPrune()
+ }
+ }
+
var dirnames []string
var filenames []string
for _, name := range names {
@@ -969,6 +1025,11 @@ type segment interface {
type memSegment struct {
buf []byte
+ // If flushing is not nil, then a) buf is being shared by a
+ // pruneMemSegments goroutine, and must be copied on write;
+ // and b) the flushing channel will close when the goroutine
+ // finishes, whether it succeeds or not.
+ flushing <-chan struct{}
}
func (me *memSegment) Len() int {
@@ -985,28 +1046,31 @@ func (me *memSegment) Slice(off, length int) segment {
}
func (me *memSegment) Truncate(n int) {
- if n > cap(me.buf) {
+ if n > cap(me.buf) || (me.flushing != nil && n > len(me.buf)) {
newsize := 1024
for newsize < n {
newsize = newsize << 2
}
newbuf := make([]byte, n, newsize)
copy(newbuf, me.buf)
- me.buf = newbuf
+ me.buf, me.flushing = newbuf, nil
} else {
- // Zero unused part when shrinking, in case we grow
- // and start using it again later.
- for i := n; i < len(me.buf); i++ {
+ // reclaim existing capacity, and zero reclaimed part
+ oldlen := len(me.buf)
+ me.buf = me.buf[:n]
+ for i := oldlen; i < n; i++ {
me.buf[i] = 0
}
}
- me.buf = me.buf[:n]
}
func (me *memSegment) WriteAt(p []byte, off int) {
if off+len(p) > len(me.buf) {
panic("overflowed segment")
}
+ if me.flushing != nil {
+ me.buf, me.flushing = append([]byte(nil), me.buf...), nil
+ }
copy(me.buf[off:], p)
}
diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go
index a6d4ab1e5..b872cc214 100644
--- a/sdk/go/arvados/fs_collection_test.go
+++ b/sdk/go/arvados/fs_collection_test.go
@@ -583,7 +583,7 @@ func (s *CollectionFSSuite) TestRandomWrites(c *check.C) {
const ngoroutines = 256
var wg sync.WaitGroup
- for n := 0; n < nfiles; n++ {
+ for n := 0; n < ngoroutines; n++ {
wg.Add(1)
go func(n int) {
defer wg.Done()
@@ -592,7 +592,7 @@ func (s *CollectionFSSuite) TestRandomWrites(c *check.C) {
f, err := s.fs.OpenFile(fmt.Sprintf("random-%d", n), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)
c.Assert(err, check.IsNil)
defer f.Close()
- for i := 0; i < ngoroutines; i++ {
+ for i := 0; i < nfiles; i++ {
trunc := rand.Intn(65)
woff := rand.Intn(trunc + 1)
wbytes = wbytes[:rand.Intn(64-woff+1)]
@@ -1046,6 +1046,7 @@ func (s *CollectionFSSuite) TestFlushFullBlocks(c *check.C) {
c.Assert(n, check.Equals, len(data))
c.Assert(err, check.IsNil)
}
+ f.(*filehandle).inode.(*filenode).waitPrune()
currentMemExtents := func() (memExtents []int) {
for idx, e := range f.(*filehandle).inode.(*filenode).segments {
commit 70d02ffd2919ffb4148cdfd8cb8566db7a01345c
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date: Tue Nov 27 16:44:45 2018 -0500
14538: Move cancel-on-first-error logic to contextGroup.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>
diff --git a/sdk/go/arvados/contextgroup.go b/sdk/go/arvados/contextgroup.go
new file mode 100644
index 000000000..fa0de2458
--- /dev/null
+++ b/sdk/go/arvados/contextgroup.go
@@ -0,0 +1,95 @@
+package arvados
+
+import (
+ "context"
+ "sync"
+)
+
+// A contextGroup is a context-aware variation on sync.WaitGroup. It
+// provides a child context for the added funcs to use, so they can
+// exit early if another added func returns an error. Its Wait()
+// method returns the first error returned by any added func.
+//
+// Example:
+//
+// err := errors.New("oops")
+// cg := newContextGroup()
+// defer cg.Cancel()
+// cg.Go(func() error {
+// someFuncWithContext(cg.Context())
+// return nil
+// })
+// cg.Go(func() error {
+// return err // this cancels cg.Context()
+// })
+// return cg.Wait() // returns err after both goroutines have ended
+type contextGroup struct {
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+ err error
+ mtx sync.Mutex
+}
+
+// newContextGroup returns a new contextGroup. The caller must
+// eventually call the Cancel() method of the returned contextGroup.
+func newContextGroup(ctx context.Context) *contextGroup {
+ ctx, cancel := context.WithCancel(ctx)
+ return &contextGroup{
+ ctx: ctx,
+ cancel: cancel,
+ }
+}
+
+// Cancel cancels the context group.
+func (cg *contextGroup) Cancel() {
+ cg.cancel()
+}
+
+// Context returns a context.Context which will be canceled when all
+// funcs have succeeded or one has failed.
+func (cg *contextGroup) Context() context.Context {
+ return cg.ctx
+}
+
+// Go calls f in a new goroutine. If f returns an error, the
+// contextGroup is canceled.
+//
+// If f notices cg.Context() is done, it should abandon further work
+// and return. In this case, f's return value will be ignored.
+func (cg *contextGroup) Go(f func() error) {
+ cg.mtx.Lock()
+ defer cg.mtx.Unlock()
+ if cg.err != nil {
+ return
+ }
+ cg.wg.Add(1)
+ go func() {
+ defer cg.wg.Done()
+ err := f()
+ cg.mtx.Lock()
+ defer cg.mtx.Unlock()
+ if err != nil && cg.err == nil {
+ cg.err = err
+ cg.cancel()
+ }
+ }()
+}
+
+// Wait waits for all added funcs to return, and returns the first
+// non-nil error.
+//
+// If the parent context is canceled before a func returns an error,
+// Wait returns the parent context's Err().
+//
+// Wait returns nil if all funcs return nil before the parent context
+// is canceled.
+func (cg *contextGroup) Wait() error {
+ cg.wg.Wait()
+ cg.mtx.Lock()
+ defer cg.mtx.Unlock()
+ if cg.err != nil {
+ return cg.err
+ }
+ return cg.ctx.Err()
+}
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index 0a7f408f8..58482142f 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -554,29 +554,22 @@ func (dn *dirnode) Child(name string, replace func(inode) (inode, error)) (inode
// local persistent storage. Caller must have write lock on dn and the
// named children.
func (dn *dirnode) sync(ctx context.Context, names []string, throttle *throttle) error {
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
+ cg := newContextGroup(ctx)
+ defer cg.Cancel()
type shortBlock struct {
fn *filenode
idx int
}
- var pending []shortBlock
- var pendingLen int
- errors := make(chan error, 1)
- var wg sync.WaitGroup
- defer wg.Wait() // we have locks: unsafe to return until all goroutines finish
-
- flush := func(sbs []shortBlock) {
- defer wg.Done()
+ flush := func(sbs []shortBlock) error {
if len(sbs) == 0 {
- return
+ return nil
}
throttle.Acquire()
defer throttle.Release()
- if ctx.Err() != nil {
- return
+ if err := cg.Context().Err(); err != nil {
+ return err
}
block := make([]byte, 0, maxBlockSize)
for _, sb := range sbs {
@@ -584,11 +577,7 @@ func (dn *dirnode) sync(ctx context.Context, names []string, throttle *throttle)
}
locator, _, err := dn.fs.PutB(block)
if err != nil {
- select {
- case errors <- err:
- default:
- }
- cancel()
+ return err
}
off := 0
for _, sb := range sbs {
@@ -603,8 +592,17 @@ func (dn *dirnode) sync(ctx context.Context, names []string, throttle *throttle)
off += len(data)
sb.fn.memsize -= int64(len(data))
}
+ return nil
+ }
+
+ goFlush := func(sbs []shortBlock) {
+ cg.Go(func() error {
+ return flush(sbs)
+ })
}
+ var pending []shortBlock
+ var pendingLen int
localLocator := map[string]string{}
for _, name := range names {
fn, ok := dn.inodes[name].(*filenode)
@@ -627,13 +625,11 @@ func (dn *dirnode) sync(ctx context.Context, names []string, throttle *throttle)
fn.segments[idx] = seg
case *memSegment:
if seg.Len() > maxBlockSize/2 {
- wg.Add(1)
- go flush([]shortBlock{{fn, idx}})
+ goFlush([]shortBlock{{fn, idx}})
continue
}
if pendingLen+seg.Len() > maxBlockSize {
- wg.Add(1)
- go flush(pending)
+ goFlush(pending)
pending = nil
pendingLen = 0
}
@@ -644,19 +640,14 @@ func (dn *dirnode) sync(ctx context.Context, names []string, throttle *throttle)
}
}
}
- wg.Add(1)
- flush(pending)
- go func() {
- wg.Wait()
- close(errors)
- }()
- return <-errors
+ goFlush(pending)
+ return cg.Wait()
}
// caller must have write lock.
func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle *throttle) (string, error) {
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
+ cg := newContextGroup(ctx)
+ defer cg.Cancel()
if len(dn.inodes) == 0 {
if prefix == "." {
@@ -690,27 +681,18 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle
}
}
- var wg sync.WaitGroup
- errors := make(chan error, len(dirnames)+1)
subdirs := make([]string, len(dirnames))
rootdir := ""
for i, name := range dirnames {
- wg.Add(1)
- go func(i int, name string) {
- defer wg.Done()
- var err error
- subdirs[i], err = dn.inodes[name].(*dirnode).marshalManifest(ctx, prefix+"/"+name, throttle)
- if err != nil {
- errors <- err
- cancel()
- }
- }(i, name)
+ i, name := i, name
+ cg.Go(func() error {
+ txt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+"/"+name, throttle)
+ subdirs[i] = txt
+ return err
+ })
}
- wg.Add(1)
- go func() {
- defer wg.Done()
-
+ cg.Go(func() error {
var streamLen int64
type filepart struct {
name string
@@ -720,10 +702,8 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle
var fileparts []filepart
var blocks []string
- if err := dn.sync(ctx, names, throttle); err != nil {
- errors <- err
- cancel()
- return
+ if err := dn.sync(cg.Context(), names, throttle); err != nil {
+ return err
}
for _, name := range filenames {
node := dn.inodes[name].(*filenode)
@@ -765,20 +745,15 @@ func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle
filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
}
if len(filetokens) == 0 {
- return
+ return nil
} else if len(blocks) == 0 {
blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
}
rootdir = manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n"
- }()
-
- wg.Wait()
- select {
- case err := <-errors:
- return "", err
- default:
- }
- return rootdir + strings.Join(subdirs, ""), nil
+ return nil
+ })
+ err := cg.Wait()
+ return rootdir + strings.Join(subdirs, ""), err
}
func (dn *dirnode) loadManifest(txt string) error {
commit 80905cf49077ecf797f6d0ae3c375ca1a7f7df20
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date: Mon Nov 26 16:31:26 2018 -0500
14538: Use concurrent writers to sync multiple streams.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index afef1e391..0a7f408f8 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -5,6 +5,7 @@
package arvados
import (
+ "context"
"encoding/json"
"fmt"
"io"
@@ -138,7 +139,7 @@ func (fs *collectionFileSystem) Sync() error {
func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {
fs.fileSystem.root.Lock()
defer fs.fileSystem.root.Unlock()
- return fs.fileSystem.root.(*dirnode).marshalManifest(prefix, newThrottle(concurrentWriters))
+ return fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix, newThrottle(concurrentWriters))
}
func (fs *collectionFileSystem) Size() int64 {
@@ -552,7 +553,10 @@ func (dn *dirnode) Child(name string, replace func(inode) (inode, error)) (inode
// children with the given names, which must be children of dn) to
// local persistent storage. Caller must have write lock on dn and the
// named children.
-func (dn *dirnode) sync(names []string, throttle *throttle) error {
+func (dn *dirnode) sync(ctx context.Context, names []string, throttle *throttle) error {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
type shortBlock struct {
fn *filenode
idx int
@@ -560,8 +564,10 @@ func (dn *dirnode) sync(names []string, throttle *throttle) error {
var pending []shortBlock
var pendingLen int
- var wg sync.WaitGroup
errors := make(chan error, 1)
+ var wg sync.WaitGroup
+ defer wg.Wait() // we have locks: unsafe to return until all goroutines finish
+
flush := func(sbs []shortBlock) {
defer wg.Done()
if len(sbs) == 0 {
@@ -569,6 +575,9 @@ func (dn *dirnode) sync(names []string, throttle *throttle) error {
}
throttle.Acquire()
defer throttle.Release()
+ if ctx.Err() != nil {
+ return
+ }
block := make([]byte, 0, maxBlockSize)
for _, sb := range sbs {
block = append(block, sb.fn.segments[sb.idx].(*memSegment).buf...)
@@ -579,6 +588,7 @@ func (dn *dirnode) sync(names []string, throttle *throttle) error {
case errors <- err:
default:
}
+ cancel()
}
off := 0
for _, sb := range sbs {
@@ -636,22 +646,17 @@ func (dn *dirnode) sync(names []string, throttle *throttle) error {
}
wg.Add(1)
flush(pending)
- wg.Wait()
- close(errors)
+ go func() {
+ wg.Wait()
+ close(errors)
+ }()
return <-errors
}
// caller must have write lock.
-func (dn *dirnode) marshalManifest(prefix string, throttle *throttle) (string, error) {
- var streamLen int64
- type filepart struct {
- name string
- offset int64
- length int64
- }
- var fileparts []filepart
- var subdirs string
- var blocks []string
+func (dn *dirnode) marshalManifest(ctx context.Context, prefix string, throttle *throttle) (string, error) {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
if len(dn.inodes) == 0 {
if prefix == "." {
@@ -669,26 +674,62 @@ func (dn *dirnode) marshalManifest(prefix string, throttle *throttle) (string, e
names = append(names, name)
}
sort.Strings(names)
+ var dirnames []string
+ var filenames []string
for _, name := range names {
node := dn.inodes[name]
node.Lock()
defer node.Unlock()
- }
- if err := dn.sync(names, throttle); err != nil {
- return "", err
- }
- for _, name := range names {
- switch node := dn.inodes[name].(type) {
+ switch node := node.(type) {
case *dirnode:
- subdir, err := node.marshalManifest(prefix+"/"+name, throttle)
+ dirnames = append(dirnames, name)
+ case *filenode:
+ filenames = append(filenames, name)
+ default:
+ panic(fmt.Sprintf("can't marshal inode type %T", node))
+ }
+ }
+
+ var wg sync.WaitGroup
+ errors := make(chan error, len(dirnames)+1)
+ subdirs := make([]string, len(dirnames))
+ rootdir := ""
+ for i, name := range dirnames {
+ wg.Add(1)
+ go func(i int, name string) {
+ defer wg.Done()
+ var err error
+ subdirs[i], err = dn.inodes[name].(*dirnode).marshalManifest(ctx, prefix+"/"+name, throttle)
if err != nil {
- return "", err
+ errors <- err
+ cancel()
}
- subdirs = subdirs + subdir
- case *filenode:
+ }(i, name)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ var streamLen int64
+ type filepart struct {
+ name string
+ offset int64
+ length int64
+ }
+
+ var fileparts []filepart
+ var blocks []string
+ if err := dn.sync(ctx, names, throttle); err != nil {
+ errors <- err
+ cancel()
+ return
+ }
+ for _, name := range filenames {
+ node := dn.inodes[name].(*filenode)
if len(node.segments) == 0 {
fileparts = append(fileparts, filepart{name: name})
- break
+ continue
}
for _, seg := range node.segments {
switch seg := seg.(type) {
@@ -718,20 +759,26 @@ func (dn *dirnode) marshalManifest(prefix string, throttle *throttle) (string, e
panic(fmt.Sprintf("can't marshal segment type %T", seg))
}
}
- default:
- panic(fmt.Sprintf("can't marshal inode type %T", node))
}
+ var filetokens []string
+ for _, s := range fileparts {
+ filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
+ }
+ if len(filetokens) == 0 {
+ return
+ } else if len(blocks) == 0 {
+ blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
+ }
+ rootdir = manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n"
+ }()
+
+ wg.Wait()
+ select {
+ case err := <-errors:
+ return "", err
+ default:
}
- var filetokens []string
- for _, s := range fileparts {
- filetokens = append(filetokens, fmt.Sprintf("%d:%d:%s", s.offset, s.length, manifestEscape(s.name)))
- }
- if len(filetokens) == 0 {
- return subdirs, nil
- } else if len(blocks) == 0 {
- blocks = []string{"d41d8cd98f00b204e9800998ecf8427e+0"}
- }
- return manifestEscape(prefix) + " " + strings.Join(blocks, " ") + " " + strings.Join(filetokens, " ") + "\n" + subdirs, nil
+ return rootdir + strings.Join(subdirs, ""), nil
}
func (dn *dirnode) loadManifest(txt string) error {
commit d9d145af78c61d900447434735cab7f3dc64fbdd
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date: Mon Nov 26 15:32:51 2018 -0500
14538: Use concurrent writers to sync a directory.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>
diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index b996542ab..afef1e391 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -20,6 +20,8 @@ import (
var maxBlockSize = 1 << 26
+var concurrentWriters = 4
+
// A CollectionFileSystem is a FileSystem that can be serialized as a
// manifest and stored as a collection.
type CollectionFileSystem interface {
@@ -136,7 +138,7 @@ func (fs *collectionFileSystem) Sync() error {
func (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {
fs.fileSystem.root.Lock()
defer fs.fileSystem.root.Unlock()
- return fs.fileSystem.root.(*dirnode).marshalManifest(prefix)
+ return fs.fileSystem.root.(*dirnode).marshalManifest(prefix, newThrottle(concurrentWriters))
}
func (fs *collectionFileSystem) Size() int64 {
@@ -550,7 +552,7 @@ func (dn *dirnode) Child(name string, replace func(inode) (inode, error)) (inode
// children with the given names, which must be children of dn) to
// local persistent storage. Caller must have write lock on dn and the
// named children.
-func (dn *dirnode) sync(names []string) error {
+func (dn *dirnode) sync(names []string, throttle *throttle) error {
type shortBlock struct {
fn *filenode
idx int
@@ -558,17 +560,25 @@ func (dn *dirnode) sync(names []string) error {
var pending []shortBlock
var pendingLen int
- flush := func(sbs []shortBlock) error {
+ var wg sync.WaitGroup
+ errors := make(chan error, 1)
+ flush := func(sbs []shortBlock) {
+ defer wg.Done()
if len(sbs) == 0 {
- return nil
+ return
}
+ throttle.Acquire()
+ defer throttle.Release()
block := make([]byte, 0, maxBlockSize)
for _, sb := range sbs {
block = append(block, sb.fn.segments[sb.idx].(*memSegment).buf...)
}
locator, _, err := dn.fs.PutB(block)
if err != nil {
- return err
+ select {
+ case errors <- err:
+ default:
+ }
}
off := 0
for _, sb := range sbs {
@@ -583,7 +593,6 @@ func (dn *dirnode) sync(names []string) error {
off += len(data)
sb.fn.memsize -= int64(len(data))
}
- return nil
}
localLocator := map[string]string{}
@@ -608,15 +617,13 @@ func (dn *dirnode) sync(names []string) error {
fn.segments[idx] = seg
case *memSegment:
if seg.Len() > maxBlockSize/2 {
- if err := flush([]shortBlock{{fn, idx}}); err != nil {
- return err
- }
+ wg.Add(1)
+ go flush([]shortBlock{{fn, idx}})
continue
}
if pendingLen+seg.Len() > maxBlockSize {
- if err := flush(pending); err != nil {
- return err
- }
+ wg.Add(1)
+ go flush(pending)
pending = nil
pendingLen = 0
}
@@ -627,11 +634,15 @@ func (dn *dirnode) sync(names []string) error {
}
}
}
- return flush(pending)
+ wg.Add(1)
+ flush(pending)
+ wg.Wait()
+ close(errors)
+ return <-errors
}
// caller must have write lock.
-func (dn *dirnode) marshalManifest(prefix string) (string, error) {
+func (dn *dirnode) marshalManifest(prefix string, throttle *throttle) (string, error) {
var streamLen int64
type filepart struct {
name string
@@ -663,13 +674,13 @@ func (dn *dirnode) marshalManifest(prefix string) (string, error) {
node.Lock()
defer node.Unlock()
}
- if err := dn.sync(names); err != nil {
+ if err := dn.sync(names, throttle); err != nil {
return "", err
}
for _, name := range names {
switch node := dn.inodes[name].(type) {
case *dirnode:
- subdir, err := node.marshalManifest(prefix + "/" + name)
+ subdir, err := node.marshalManifest(prefix+"/"+name, throttle)
if err != nil {
return "", err
}
diff --git a/sdk/go/arvados/throttle.go b/sdk/go/arvados/throttle.go
new file mode 100644
index 000000000..464b73b41
--- /dev/null
+++ b/sdk/go/arvados/throttle.go
@@ -0,0 +1,17 @@
+package arvados
+
+type throttle struct {
+ c chan struct{}
+}
+
+func newThrottle(n int) *throttle {
+ return &throttle{c: make(chan struct{}, n)}
+}
+
+func (t *throttle) Acquire() {
+ t.c <- struct{}{}
+}
+
+func (t *throttle) Release() {
+ <-t.c
+}
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list