[ARVADOS] updated: 1.3.0-1722-gbf08477c7

Git user git at public.curoverse.com
Tue Oct 22 19:32:56 UTC 2019


Summary of changes:
 sdk/go/arvados/fs_collection.go      | 36 +++++++++++++------
 sdk/go/arvados/fs_collection_test.go | 69 ++++++++++++++++++++++++++++++++++++
 2 files changed, 94 insertions(+), 11 deletions(-)

       via  bf08477c7e766c7692731c08212c8d1c3c5628ea (commit)
       via  a6cba47c47dcd6a7f0bce99de9bbed6a87ef3102 (commit)
      from  0daa251cdddbef3db6a69b388170fdb2901964c6 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit bf08477c7e766c7692731c08212c8d1c3c5628ea
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Oct 22 15:31:12 2019 -0400

    15652: Eliminate a malloc when writing a one-segment block.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index 00f7ee73e..b3e6aa96e 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -649,6 +649,9 @@ type fnSegmentRef struct {
 // storedSegments that reference the relevant portions of the new
 // block.
 //
+// bufsize is the total data size in refs. It is used to preallocate
+// the correct amount of memory when len(refs)>1.
+//
 // If sync is false, commitBlock returns right away, after starting a
 // goroutine to do the writes, reacquire the filenodes' locks, and
 // swap out the *memSegments. Some filenodes' segments might get
@@ -656,12 +659,15 @@ type fnSegmentRef struct {
 // won't replace them.
 //
 // Caller must have write lock.
-func (dn *dirnode) commitBlock(ctx context.Context, refs []fnSegmentRef, sync bool) error {
+func (dn *dirnode) commitBlock(ctx context.Context, refs []fnSegmentRef, bufsize int, sync bool) error {
+	if len(refs) == 0 {
+		return nil
+	}
 	if err := ctx.Err(); err != nil {
 		return err
 	}
 	done := make(chan struct{})
-	block := make([]byte, 0, maxBlockSize)
+	var block []byte
 	segs := make([]*memSegment, 0, len(refs))
 	offsets := make([]int, 0, len(refs)) // location of segment's data within block
 	for _, ref := range refs {
@@ -678,7 +684,13 @@ func (dn *dirnode) commitBlock(ctx context.Context, refs []fnSegmentRef, sync bo
 		}
 		seg.flushing = done
 		offsets = append(offsets, len(block))
-		block = append(block, seg.buf...)
+		if len(refs) == 1 {
+			block = seg.buf
+		} else if block == nil {
+			block = append(make([]byte, 0, bufsize), seg.buf...)
+		} else {
+			block = append(block, seg.buf...)
+		}
 		segs = append(segs, seg)
 	}
 	dn.fs.throttle().Acquire()
@@ -769,12 +781,9 @@ func (dn *dirnode) flush(ctx context.Context, names []string, opts flushOpts) er
 	cg := newContextGroup(ctx)
 	defer cg.Cancel()
 
-	goCommit := func(refs []fnSegmentRef) {
-		if len(refs) == 0 {
-			return
-		}
+	goCommit := func(refs []fnSegmentRef, bufsize int) {
 		cg.Go(func() error {
-			return dn.commitBlock(cg.Context(), refs, opts.sync)
+			return dn.commitBlock(cg.Context(), refs, bufsize, opts.sync)
 		})
 	}
 
@@ -808,11 +817,11 @@ func (dn *dirnode) flush(ctx context.Context, names []string, opts flushOpts) er
 					node.segments[idx] = seg
 				case *memSegment:
 					if seg.Len() > maxBlockSize/2 {
-						goCommit([]fnSegmentRef{{node, idx}})
+						goCommit([]fnSegmentRef{{node, idx}}, seg.Len())
 						continue
 					}
 					if pendingLen+seg.Len() > maxBlockSize {
-						goCommit(pending)
+						goCommit(pending, pendingLen)
 						pending = nil
 						pendingLen = 0
 					}
@@ -825,7 +834,7 @@ func (dn *dirnode) flush(ctx context.Context, names []string, opts flushOpts) er
 		}
 	}
 	if opts.shortBlocks {
-		goCommit(pending)
+		goCommit(pending, pendingLen)
 	}
 	return cg.Wait()
 }

commit a6cba47c47dcd6a7f0bce99de9bbed6a87ef3102
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Tue Oct 22 15:30:58 2019 -0400

    15652: Add concurrency/memory test. Fix missing node lock in Flush.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/sdk/go/arvados/fs_collection.go b/sdk/go/arvados/fs_collection.go
index 00eee7405..00f7ee73e 100644
--- a/sdk/go/arvados/fs_collection.go
+++ b/sdk/go/arvados/fs_collection.go
@@ -170,6 +170,11 @@ func (fs *collectionFileSystem) Flush(path string, shortBlocks bool) error {
 		}
 		names = filenames
 	}
+	for _, name := range names {
+		child := dn.inodes[name]
+		child.Lock()
+		defer child.Unlock()
+	}
 	return dn.flush(context.TODO(), names, flushOpts{sync: false, shortBlocks: shortBlocks})
 }
 
diff --git a/sdk/go/arvados/fs_collection_test.go b/sdk/go/arvados/fs_collection_test.go
index 5f8d67510..352b226bf 100644
--- a/sdk/go/arvados/fs_collection_test.go
+++ b/sdk/go/arvados/fs_collection_test.go
@@ -1211,6 +1211,75 @@ func (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {
 	waitForFlush(0, nDirs*67<<20)
 }
 
+// Even when writing lots of files/dirs from different goroutines, as
+// long as Flush(dir,false) is called after writing each file,
+// unflushed data should be limited to one full block per
+// concurrentWriter, plus one nearly-full block at the end of each
+// dir/stream.
+func (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) {
+	nDirs := int64(8)
+	maxUnflushed := (int64(concurrentWriters) + nDirs) << 26
+
+	fs, err := (&Collection{}).FileSystem(s.client, s.kc)
+	c.Assert(err, check.IsNil)
+
+	release := make(chan struct{})
+	timeout := make(chan struct{})
+	time.AfterFunc(10*time.Second, func() { close(timeout) })
+	var putCount, concurrency int64
+	var unflushed int64
+	s.kc.onPut = func(p []byte) {
+		defer atomic.AddInt64(&unflushed, -int64(len(p)))
+		cur := atomic.AddInt64(&concurrency, 1)
+		defer atomic.AddInt64(&concurrency, -1)
+		pc := atomic.AddInt64(&putCount, 1)
+		if pc < int64(concurrentWriters) {
+			// Block until we reach concurrentWriters, to
+			// make sure we're really accepting concurrent
+			// writes.
+			select {
+			case <-release:
+			case <-timeout:
+				c.Error("timeout")
+			}
+		} else if pc == int64(concurrentWriters) {
+			// Unblock the first N-1 PUT reqs.
+			close(release)
+		}
+		c.Assert(cur <= int64(concurrentWriters), check.Equals, true)
+		c.Assert(atomic.LoadInt64(&unflushed) <= maxUnflushed, check.Equals, true)
+	}
+
+	var owg sync.WaitGroup
+	megabyte := make([]byte, 1<<20)
+	for i := int64(0); i < nDirs; i++ {
+		dir := fmt.Sprintf("dir%d", i)
+		fs.Mkdir(dir, 0755)
+		owg.Add(1)
+		go func() {
+			defer owg.Done()
+			defer fs.Flush(dir, true)
+			var iwg sync.WaitGroup
+			defer iwg.Wait()
+			for j := 0; j < 67; j++ {
+				iwg.Add(1)
+				go func(j int) {
+					defer iwg.Done()
+					f, err := fs.OpenFile(fmt.Sprintf("%s/file%d", dir, j), os.O_WRONLY|os.O_CREATE, 0)
+					c.Assert(err, check.IsNil)
+					defer f.Close()
+					n, err := f.Write(megabyte)
+					c.Assert(err, check.IsNil)
+					atomic.AddInt64(&unflushed, int64(n))
+					fs.Flush(dir, false)
+				}(j)
+			}
+		}()
+	}
+	owg.Wait()
+	fs.Flush("", true)
+}
+
 func (s *CollectionFSSuite) TestBrokenManifests(c *check.C) {
 	for _, txt := range []string{
 		"\n",

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list