[ARVADOS] created: 1.1.0-134-gda3f228

Git user git at public.curoverse.com
Fri Nov 10 16:34:50 EST 2017


        at  da3f22835804cc1bafe7daf373867ef46cbb20e8 (commit)


commit da3f22835804cc1bafe7daf373867ef46cbb20e8
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Fri Nov 10 16:34:28 2017 -0500

    12483: Add basic file writing tests.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/sdk/go/arvados/collection_fs_test.go b/sdk/go/arvados/collection_fs_test.go
index 7bd3570..605c41c 100644
--- a/sdk/go/arvados/collection_fs_test.go
+++ b/sdk/go/arvados/collection_fs_test.go
@@ -135,14 +135,80 @@ func (s *CollectionFSSuite) TestNotExist(c *check.C) {
 	}
 }
 
-func (s *CollectionFSSuite) TestOpenFile(c *check.C) {
-	c.Skip("cannot test files with nil keepclient")
+func (s *CollectionFSSuite) TestReadOnlyFile(c *check.C) {
+	f, err := s.fs.OpenFile("/dir1/foo", os.O_RDONLY, 0)
+	c.Assert(err, check.IsNil)
+	st, err := f.Stat()
+	c.Assert(err, check.IsNil)
+	c.Check(st.Size(), check.Equals, int64(3))
+	n, err := f.Write([]byte("bar"))
+	c.Check(n, check.Equals, 0)
+	c.Check(err, check.Equals, ErrReadOnlyFile)
+}
+
+func (s *CollectionFSSuite) TestCreateFile(c *check.C) {
+	f, err := s.fs.OpenFile("/newfile", os.O_RDWR|os.O_CREATE, 0)
+	c.Assert(err, check.IsNil)
+	st, err := f.Stat()
+	c.Assert(err, check.IsNil)
+	c.Check(st.Size(), check.Equals, int64(0))
+
+	n, err := f.Write([]byte("bar"))
+	c.Check(n, check.Equals, 3)
+	c.Check(err, check.IsNil)
+
+	c.Check(f.Close(), check.IsNil)
+
+	f, err = s.fs.OpenFile("/newfile", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)
+	c.Check(f, check.IsNil)
+	c.Assert(err, check.NotNil)
+
+	f, err = s.fs.OpenFile("/newfile", os.O_RDWR, 0)
+	c.Assert(err, check.IsNil)
+	st, err = f.Stat()
+	c.Assert(err, check.IsNil)
+	c.Check(st.Size(), check.Equals, int64(3))
+
+	c.Check(f.Close(), check.IsNil)
+
+	// TODO: serialize to Collection, confirm manifest contents,
+	// make new FileSystem, confirm file contents.
+}
 
-	f, err := s.fs.Open("/foo.txt")
+func (s *CollectionFSSuite) TestReadWriteFile(c *check.C) {
+	f, err := s.fs.OpenFile("/dir1/foo", os.O_RDWR, 0)
 	c.Assert(err, check.IsNil)
 	st, err := f.Stat()
 	c.Assert(err, check.IsNil)
 	c.Check(st.Size(), check.Equals, int64(3))
+
+	buf := make([]byte, 4)
+	n, err := f.Read(buf)
+	c.Check(n, check.Equals, 3)
+	c.Check(err, check.Equals, io.EOF)
+	c.Check(string(buf[:3]), check.DeepEquals, "foo")
+
+	pos, err := f.Seek(-2, os.SEEK_CUR)
+	c.Check(pos, check.Equals, int64(1))
+	c.Check(err, check.IsNil)
+
+	n, err = f.Write([]byte("*"))
+	c.Check(n, check.Equals, 1)
+	c.Check(err, check.IsNil)
+
+	pos, err = f.Seek(0, os.SEEK_CUR)
+	c.Check(pos, check.Equals, int64(2))
+	c.Check(err, check.IsNil)
+
+	pos, err = f.Seek(0, os.SEEK_SET)
+	c.Check(pos, check.Equals, int64(0))
+	c.Check(err, check.IsNil)
+
+	buf = make([]byte, 4)
+	n, err = f.Read(buf)
+	c.Check(n, check.Equals, 3)
+	c.Check(err, check.Equals, io.EOF)
+	c.Check(string(buf[:3]), check.Equals, "f*o")
 }
 
 // Gocheck boilerplate

commit 3e3abb01d17b0968e22e6738da12c86ad0a2a06c
Author: Tom Clegg <tclegg at veritasgenetics.com>
Date:   Thu Nov 9 13:24:34 2017 -0500

    12483: Rewrite collection filesystem.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tclegg at veritasgenetics.com>

diff --git a/sdk/go/arvados/collection_fs.go b/sdk/go/arvados/collection_fs.go
index 1acf274..1dee6f1 100644
--- a/sdk/go/arvados/collection_fs.go
+++ b/sdk/go/arvados/collection_fs.go
@@ -5,268 +5,647 @@
 package arvados
 
 import (
+	"errors"
 	"io"
 	"net/http"
 	"os"
 	"path"
+	"regexp"
+	"strconv"
 	"strings"
 	"sync"
 	"time"
+)
 
-	"git.curoverse.com/arvados.git/sdk/go/manifest"
+var (
+	ErrReadOnlyFile     = errors.New("read-only file")
+	ErrNegativeOffset   = errors.New("cannot seek to negative offset")
+	ErrFileExists       = errors.New("file exists")
+	ErrInvalidOperation = errors.New("invalid operation")
+	ErrPermission       = os.ErrPermission
 )
 
 type File interface {
 	io.Reader
+	io.Writer
 	io.Closer
 	io.Seeker
 	Size() int64
+	Readdir(int) ([]os.FileInfo, error)
+	Stat() (os.FileInfo, error)
 }
 
 type keepClient interface {
-	ManifestFileReader(manifest.Manifest, string) (File, error)
+	ReadAt(locator string, p []byte, off int64) (int, error)
 }
 
-type collectionFile struct {
-	File
-	collection *Collection
-	name       string
-	size       int64
+type fileinfo struct {
+	name    string
+	mode    os.FileMode
+	size    int64
+	modTime time.Time
 }
 
-func (cf *collectionFile) Size() int64 {
-	return cf.size
+// Name implements os.FileInfo.
+func (fi fileinfo) Name() string {
+	return fi.name
 }
 
-func (cf *collectionFile) Readdir(count int) ([]os.FileInfo, error) {
-	return nil, io.EOF
+// ModTime implements os.FileInfo.
+func (fi fileinfo) ModTime() time.Time {
+	return fi.modTime
 }
 
-func (cf *collectionFile) Stat() (os.FileInfo, error) {
-	return collectionDirent{
-		collection: cf.collection,
-		name:       cf.name,
-		size:       cf.size,
-		isDir:      false,
-	}, nil
+// Mode implements os.FileInfo.
+func (fi fileinfo) Mode() os.FileMode {
+	return fi.mode
 }
 
-type collectionDir struct {
-	collection *Collection
-	stream     string
-	dirents    []os.FileInfo
+// IsDir implements os.FileInfo.
+func (fi fileinfo) IsDir() bool {
+	return fi.mode&os.ModeDir != 0
 }
 
-// Readdir implements os.File.
-func (cd *collectionDir) Readdir(count int) ([]os.FileInfo, error) {
-	ret := cd.dirents
-	if count <= 0 {
-		cd.dirents = nil
-		return ret, nil
-	} else if len(ret) == 0 {
-		return nil, io.EOF
-	}
-	var err error
-	if count >= len(ret) {
-		count = len(ret)
-		err = io.EOF
-	}
-	cd.dirents = cd.dirents[count:]
-	return ret[:count], err
+// Size implements os.FileInfo.
+func (fi fileinfo) Size() int64 {
+	return fi.size
 }
 
-// Stat implements os.File.
-func (cd *collectionDir) Stat() (os.FileInfo, error) {
-	return collectionDirent{
-		collection: cd.collection,
-		name:       path.Base(cd.stream),
-		isDir:      true,
-		size:       int64(len(cd.dirents)),
-	}, nil
+// Sys implements os.FileInfo.
+func (fi fileinfo) Sys() interface{} {
+	return nil
 }
 
-// Close implements os.File.
-func (cd *collectionDir) Close() error {
-	return nil
+func (fi fileinfo) Stat() os.FileInfo {
+	return fi
 }
 
-// Read implements os.File.
-func (cd *collectionDir) Read([]byte) (int, error) {
-	return 0, nil
+// A CollectionFileSystem is an http.Filesystem plus Stat() and
+// support for opening writable files.
+type CollectionFileSystem interface {
+	http.FileSystem
+	Stat(name string) (os.FileInfo, error)
+	Create(name string) (File, error)
+	OpenFile(name string, flag int, perm os.FileMode) (File, error)
 }
 
-// Seek implements os.File.
-func (cd *collectionDir) Seek(int64, int) (int64, error) {
-	return 0, nil
+type fileSystem struct {
+	dirnode
 }
 
-// collectionDirent implements os.FileInfo.
-type collectionDirent struct {
-	collection *Collection
-	name       string
-	isDir      bool
-	mode       os.FileMode
-	size       int64
+func (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	return fs.dirnode.OpenFile(path.Clean(name), flag, perm)
 }
 
-// Name implements os.FileInfo.
-func (e collectionDirent) Name() string {
-	return e.name
+func (fs *fileSystem) Open(name string) (http.File, error) {
+	return fs.dirnode.OpenFile(path.Clean(name), os.O_RDONLY, 0)
 }
 
-// ModTime implements os.FileInfo.
-func (e collectionDirent) ModTime() time.Time {
-	if e.collection.ModifiedAt == nil {
-		return time.Now()
+func (fs *fileSystem) Create(name string) (File, error) {
+	return fs.dirnode.OpenFile(path.Clean(name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)
+}
+
+func (fs *fileSystem) Stat(name string) (os.FileInfo, error) {
+	f, err := fs.OpenFile(name, os.O_RDONLY, 0)
+	if err != nil {
+		return nil, err
 	}
-	return *e.collection.ModifiedAt
+	defer f.Close()
+	return f.Stat()
 }
 
-// Mode implements os.FileInfo.
-func (e collectionDirent) Mode() os.FileMode {
-	if e.isDir {
-		return 0555
-	} else {
-		return 0444
+type inode interface {
+	os.FileInfo
+	OpenFile(string, int, os.FileMode) (*file, error)
+	Parent() inode
+	Read([]byte, filenodePtr) (int, filenodePtr, error)
+	Write([]byte, filenodePtr) (int, filenodePtr, error)
+	Readdir() []os.FileInfo
+	Stat() os.FileInfo
+	sync.Locker
+	RLock()
+	RUnlock()
+}
+
+// filenode implements inode.
+type filenode struct {
+	fileinfo
+	parent   *dirnode
+	extents  []extent
+	repacked int64 // number of times anything in []extents has changed len
+	sync.RWMutex
+}
+
+// filenodePtr is an offset into a file that is (usually) efficient to
+// seek to. Specifically, if filenode.repacked==filenodePtr.repacked
+// then filenode.extents[filenodePtr.extentIdx][filenodePtr.extentOff]
+// corresponds to file offset filenodePtr.off. Otherwise, it is
+// necessary to reexamine len(filenode.extents[0]) etc. to find the
+// correct extent and offset.
+type filenodePtr struct {
+	off       int64
+	extentIdx int
+	extentOff int
+	repacked  int64
+}
+
+// seek returns a ptr that is consistent with both startPtr.off and
+// the current state of fn. The caller must already hold fn.RLock() or
+// fn.Lock().
+//
+// If startPtr points beyond the end of the file, ptr will point to
+// exactly the end of the file.
+//
+// After seeking:
+//
+//     ptr.extentIdx == len(filenode.extents) // i.e., at EOF
+//     ||
+//     filenode.extents[ptr.extentIdx].Len() >= ptr.extentOff
+func (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {
+	ptr = startPtr
+	if ptr.off < 0 {
+		// meaningless anyway
+		return
+	} else if ptr.off >= fn.fileinfo.size {
+		ptr.off = fn.fileinfo.size
+		ptr.extentIdx = len(fn.extents)
+		ptr.extentOff = 0
+		ptr.repacked = fn.repacked
+		return
+	} else if ptr.repacked == fn.repacked {
+		// extentIdx and extentOff accurately reflect ptr.off,
+		// but might have fallen off the end of an extent
+		if int64(ptr.extentOff) >= fn.extents[ptr.extentIdx].Len() {
+			ptr.extentIdx++
+			ptr.extentOff = 0
+		}
+		return
+	}
+	defer func() {
+		ptr.repacked = fn.repacked
+	}()
+	if ptr.off >= fn.fileinfo.size {
+		ptr.extentIdx, ptr.extentOff = len(fn.extents), 0
+		return
 	}
+	// Recompute extentIdx and extentOff.  We have already
+	// established fn.fileinfo.size > ptr.off >= 0, so we don't
+	// have to deal with edge cases here.
+	var off int64
+	for ptr.extentIdx, ptr.extentOff = 0, 0; off < ptr.off; ptr.extentIdx++ {
+		// This would panic (index out of range) if
+		// fn.fileinfo.size were larger than
+		// sum(fn.extents[i].Len()) -- but that can't happen
+		// because we have ensured fn.fileinfo.size is always
+		// accurate.
+		extLen := fn.extents[ptr.extentIdx].Len()
+		if off+extLen > ptr.off {
+			ptr.extentOff = int(ptr.off - off)
+			break
+		}
+		off += extLen
+	}
+	return
 }
 
-// IsDir implements os.FileInfo.
-func (e collectionDirent) IsDir() bool {
-	return e.isDir
+func (fn *filenode) appendExtent(e extent) {
+	fn.Lock()
+	defer fn.Unlock()
+	fn.extents = append(fn.extents, e)
+	fn.fileinfo.size += e.Len()
+	fn.repacked++
 }
 
-// Size implements os.FileInfo.
-func (e collectionDirent) Size() int64 {
-	return e.size
+func (fn *filenode) OpenFile(string, int, os.FileMode) (*file, error) {
+	return nil, os.ErrNotExist
 }
 
-// Sys implements os.FileInfo.
-func (e collectionDirent) Sys() interface{} {
+func (fn *filenode) Parent() inode {
+	return fn.parent
+}
+
+func (fn *filenode) Readdir() []os.FileInfo {
 	return nil
 }
 
-// A CollectionFileSystem is an http.Filesystem with an added Stat() method.
-type CollectionFileSystem interface {
-	http.FileSystem
-	Stat(name string) (os.FileInfo, error)
+func (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
+	fn.RLock()
+	defer fn.RUnlock()
+	ptr = fn.seek(startPtr)
+	if ptr.off < 0 {
+		err = ErrNegativeOffset
+		return
+	}
+	if ptr.extentIdx >= len(fn.extents) {
+		err = io.EOF
+		return
+	}
+	n, err = fn.extents[ptr.extentIdx].ReadAt(p, int64(ptr.extentOff))
+	if n > 0 {
+		ptr.off += int64(n)
+		ptr.extentOff += n
+		if int64(ptr.extentOff) == fn.extents[ptr.extentIdx].Len() {
+			ptr.extentIdx++
+			ptr.extentOff = 0
+			if ptr.extentIdx < len(fn.extents) && err == io.EOF {
+				err = nil
+			}
+		}
+	}
+	return
 }
 
-// collectionFS implements CollectionFileSystem.
-type collectionFS struct {
-	collection *Collection
-	client     *Client
-	kc         keepClient
-	sizes      map[string]int64
-	sizesOnce  sync.Once
+func (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {
+	fn.Lock()
+	defer fn.Unlock()
+	ptr = fn.seek(startPtr)
+	if ptr.off < 0 {
+		err = ErrNegativeOffset
+		return
+	}
+	err = ErrReadOnlyFile
+	return
 }
 
 // FileSystem returns a CollectionFileSystem for the collection.
 func (c *Collection) FileSystem(client *Client, kc keepClient) CollectionFileSystem {
-	return &collectionFS{
-		collection: c,
-		client:     client,
-		kc:         kc,
-	}
-}
-
-func (c *collectionFS) Stat(name string) (os.FileInfo, error) {
-	name = canonicalName(name)
-	if name == "." {
-		return collectionDirent{
-			collection: c.collection,
-			name:       "/",
-			isDir:      true,
-		}, nil
-	}
-	if size, ok := c.fileSizes()[name]; ok {
-		return collectionDirent{
-			collection: c.collection,
-			name:       path.Base(name),
-			size:       size,
-			isDir:      false,
-		}, nil
-	}
-	for fnm := range c.fileSizes() {
-		if !strings.HasPrefix(fnm, name+"/") {
-			continue
+	fs := &fileSystem{dirnode: dirnode{
+		cache:    &keepBlockCache{kc: kc},
+		client:   client,
+		kc:       kc,
+		fileinfo: fileinfo{name: ".", mode: os.ModeDir | 0755},
+		parent:   nil,
+		inodes:   make(map[string]inode),
+	}}
+	fs.dirnode.parent = &fs.dirnode
+	fs.dirnode.loadManifest(c.ManifestText)
+	return fs
+}
+
+type file struct {
+	inode
+	ptr        filenodePtr
+	append     bool
+	writable   bool
+	unreaddirs []os.FileInfo
+}
+
+func (f *file) Read(p []byte) (n int, err error) {
+	n, f.ptr, err = f.inode.Read(p, f.ptr)
+	return
+}
+
+func (f *file) Seek(off int64, whence int) (pos int64, err error) {
+	size := f.inode.Size()
+	ptr := f.ptr
+	switch whence {
+	case os.SEEK_SET:
+		ptr.off = off
+	case os.SEEK_CUR:
+		ptr.off += off
+	case os.SEEK_END:
+		ptr.off = size + off
+	}
+	if ptr.off < 0 {
+		return f.ptr.off, ErrNegativeOffset
+	}
+	if ptr.off > size {
+		ptr.off = size
+	}
+	if ptr.off != f.ptr.off {
+		f.ptr = ptr
+		// force filenode to recompute f.ptr fields on next
+		// use
+		f.ptr.repacked = -1
+	}
+	return f.ptr.off, nil
+}
+
+func (f *file) Write(p []byte) (n int, err error) {
+	n, f.ptr, err = f.inode.Write(p, f.ptr)
+	return
+}
+
+func (f *file) Readdir(count int) ([]os.FileInfo, error) {
+	if !f.inode.IsDir() {
+		return nil, ErrInvalidOperation
+	}
+	if count <= 0 {
+		return f.inode.Readdir(), nil
+	}
+	if f.unreaddirs == nil {
+		f.unreaddirs = f.inode.Readdir()
+	}
+	if len(f.unreaddirs) == 0 {
+		return nil, io.EOF
+	}
+	if count > len(f.unreaddirs) {
+		count = len(f.unreaddirs)
+	}
+	ret := f.unreaddirs[:count]
+	f.unreaddirs = f.unreaddirs[count:]
+	return ret, nil
+}
+
+func (f *file) Stat() (os.FileInfo, error) {
+	return f.inode, nil
+}
+
+func (f *file) Close() error {
+	// FIXME: flush
+	return nil
+}
+
+func (f *file) OpenFile(name string, flag int, perm os.FileMode) (*file, error) {
+	return f.inode.OpenFile(name, flag, perm)
+}
+
+type dirnode struct {
+	fileinfo
+	parent *dirnode
+	client *Client
+	kc     keepClient
+	cache  blockCache
+	inodes map[string]inode
+	sync.RWMutex
+}
+
+func (dn *dirnode) loadManifest(txt string) {
+	// FIXME: faster
+	var dirname string
+	for _, stream := range strings.Split(txt, "\n") {
+		var extents []storedExtent
+		for i, token := range strings.Split(stream, " ") {
+			if i == 0 {
+				dirname = manifestUnescape(token)
+				continue
+			}
+			if !strings.Contains(token, ":") {
+				toks := strings.SplitN(token, "+", 3)
+				if len(toks) < 2 {
+					// FIXME: broken
+					continue
+				}
+				length, err := strconv.ParseInt(toks[1], 10, 32)
+				if err != nil || length < 0 {
+					// FIXME: broken
+					continue
+				}
+				extents = append(extents, storedExtent{
+					locator: token,
+					offset:  0,
+					length:  int(length),
+				})
+				continue
+			}
+			toks := strings.Split(token, ":")
+			if len(toks) != 3 {
+				// FIXME: broken manifest
+				continue
+			}
+			offset, err := strconv.ParseInt(toks[0], 10, 64)
+			if err != nil || offset < 0 {
+				// FIXME: broken manifest
+				continue
+			}
+			length, err := strconv.ParseInt(toks[1], 10, 64)
+			if err != nil || length < 0 {
+				// FIXME: broken manifest
+				continue
+			}
+			name := path.Clean(dirname + "/" + manifestUnescape(toks[2]))
+			dn.makeParentDirs(name)
+			f, err := dn.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700)
+			if err != nil {
+				// FIXME: broken
+				continue
+			}
+			if f.inode.Stat().IsDir() {
+				f.Close()
+				// FIXME: broken manifest
+				continue
+			}
+			var pos int64
+			for _, e := range extents {
+				if pos+e.Len() < offset {
+					pos += e.Len()
+					continue
+				}
+				if pos > offset+length {
+					break
+				}
+				var blkOff int
+				if pos < offset {
+					blkOff = int(offset - pos)
+				}
+				blkLen := int(e.Len()) - blkOff
+				if pos+int64(blkOff+blkLen) > offset+length {
+					blkLen = int(offset + length - pos - int64(blkOff))
+				}
+				f.inode.(*filenode).appendExtent(storedExtent{
+					cache:   dn.cache,
+					locator: e.locator,
+					offset:  blkOff,
+					length:  blkLen,
+				})
+				pos += e.Len()
+			}
+			f.Close()
 		}
-		return collectionDirent{
-			collection: c.collection,
-			name:       path.Base(name),
-			isDir:      true,
-		}, nil
 	}
-	return nil, os.ErrNotExist
 }
 
-func (c *collectionFS) Open(name string) (http.File, error) {
-	// Ensure name looks the way it does in a manifest.
-	name = canonicalName(name)
+func (dn *dirnode) makeParentDirs(name string) {
+	names := strings.Split(name, "/")
+	for _, name := range names[:len(names)-1] {
+		dn.Lock()
+		defer dn.Unlock()
+		if n, ok := dn.inodes[name]; !ok {
+			n := &dirnode{
+				parent: dn,
+				client: dn.client,
+				kc:     dn.kc,
+				fileinfo: fileinfo{
+					name: name,
+					mode: os.ModeDir | 0755,
+				},
+			}
+			if dn.inodes == nil {
+				dn.inodes = make(map[string]inode)
+			}
+			dn.inodes[name] = n
+			dn.fileinfo.size++
+			dn = n
+		} else if n, ok := n.(*dirnode); ok {
+			dn = n
+		} else {
+			// fail
+			return
+		}
+	}
+}
+
+func (dn *dirnode) Parent() inode {
+	return dn.parent
+}
+
+func (dn *dirnode) Readdir() (fi []os.FileInfo) {
+	dn.RLock()
+	defer dn.RUnlock()
+	fi = make([]os.FileInfo, 0, len(dn.inodes))
+	for _, inode := range dn.inodes {
+		fi = append(fi, inode.Stat())
+	}
+	return
+}
 
-	m := manifest.Manifest{Text: c.collection.ManifestText}
+func (dn *dirnode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
+	return 0, ptr, ErrInvalidOperation
+}
+
+func (dn *dirnode) Write(p []byte, ptr filenodePtr) (int, filenodePtr, error) {
+	return 0, ptr, ErrInvalidOperation
+}
 
-	// Return a file if it exists.
-	if size, ok := c.fileSizes()[name]; ok {
-		reader, err := c.kc.ManifestFileReader(m, name)
+func (dn *dirnode) OpenFile(name string, flag int, perm os.FileMode) (*file, error) {
+	name = strings.TrimSuffix(name, "/")
+	if name == "." || name == "" {
+		return &file{inode: dn}, nil
+	}
+	if dirname, name := path.Split(name); dirname != "" {
+		// OpenFile("foo/bar/baz") =>
+		// OpenFile("foo/bar").OpenFile("baz") (or
+		// ErrNotExist, if foo/bar is a file)
+		f, err := dn.OpenFile(dirname, os.O_RDONLY, 0)
 		if err != nil {
 			return nil, err
 		}
-		return &collectionFile{
-			File:       reader,
-			collection: c.collection,
-			name:       path.Base(name),
-			size:       size,
-		}, nil
-	}
-
-	// Return a directory if it's the root dir or there are file
-	// entries below it.
-	children := map[string]collectionDirent{}
-	for fnm, size := range c.fileSizes() {
-		if !strings.HasPrefix(fnm, name+"/") {
-			continue
+		defer f.Close()
+		if dn, ok := f.inode.(*dirnode); ok {
+			return dn.OpenFile(name, flag, perm)
+		} else {
+			return nil, os.ErrNotExist
+		}
+	}
+	dn.Lock()
+	defer dn.Unlock()
+	if name == ".." {
+		return &file{inode: dn.parent}, nil
+	}
+	n, ok := dn.inodes[name]
+	if !ok {
+		if flag&os.O_CREATE == 0 {
+			return nil, os.ErrNotExist
+		}
+		n = &filenode{
+			parent: dn,
+			fileinfo: fileinfo{
+				name: name,
+				mode: 0755,
+			},
 		}
-		isDir := false
-		ent := fnm[len(name)+1:]
-		if i := strings.Index(ent, "/"); i >= 0 {
-			ent = ent[:i]
-			isDir = true
+		if dn.inodes == nil {
+			dn.inodes = make(map[string]inode)
 		}
-		e := children[ent]
-		e.collection = c.collection
-		e.isDir = isDir
-		e.name = ent
-		e.size = size
-		children[ent] = e
-	}
-	if len(children) == 0 && name != "." {
-		return nil, os.ErrNotExist
-	}
-	dirents := make([]os.FileInfo, 0, len(children))
-	for _, ent := range children {
-		dirents = append(dirents, ent)
-	}
-	return &collectionDir{
-		collection: c.collection,
-		stream:     name,
-		dirents:    dirents,
+		dn.inodes[name] = n
+		dn.fileinfo.size++
+	} else if flag&os.O_EXCL != 0 {
+		return nil, ErrFileExists
+	}
+	return &file{
+		inode:    n,
+		append:   flag&os.O_APPEND != 0,
+		writable: flag&(os.O_WRONLY|os.O_RDWR) != 0,
 	}, nil
 }
 
-// fileSizes returns a map of files that can be opened. Each key
-// starts with "./".
-func (c *collectionFS) fileSizes() map[string]int64 {
-	c.sizesOnce.Do(func() {
-		c.sizes = map[string]int64{}
-		m := manifest.Manifest{Text: c.collection.ManifestText}
-		for ms := range m.StreamIter() {
-			for _, fss := range ms.FileStreamSegments {
-				c.sizes[ms.StreamName+"/"+fss.Name] += int64(fss.SegLen)
-			}
+type extent interface {
+	io.ReaderAt
+	Len() int64
+}
+
+type writableExtent interface {
+	extent
+	WriteAt(p []byte, off int)
+	Truncate(n int)
+}
+
+type memExtent struct {
+	buf []byte
+}
+
+func (me *memExtent) Len() int64 {
+	return int64(len(me.buf))
+}
+
+func (me *memExtent) Truncate(n int) {
+	if n > cap(me.buf) {
+		newsize := 1024
+		for newsize < n {
+			newsize = newsize << 2
+		}
+		newbuf := make([]byte, n, newsize)
+		copy(newbuf, me.buf)
+		me.buf = newbuf
+	}
+	me.buf = me.buf[:n]
+}
+
+func (me *memExtent) WriteAt(p []byte, off int) {
+	if off+len(p) > len(me.buf) {
+		panic("overflowed extent")
+	}
+	copy(me.buf[off:], p)
+}
+
+func (me *memExtent) ReadAt(p []byte, off int64) (n int, err error) {
+	if off > me.Len() {
+		err = io.EOF
+		return
+	}
+	n = copy(p, me.buf[int(off):])
+	if n < len(p) {
+		err = io.EOF
+	}
+	return
+}
+
+type storedExtent struct {
+	cache   blockCache
+	locator string
+	offset  int
+	length  int
+}
+
+func (se storedExtent) Len() int64 {
+	return int64(se.length)
+}
+
+func (se storedExtent) ReadAt(p []byte, off int64) (n int, err error) {
+	maxlen := int(int64(se.length) - int64(se.offset) - off)
+	if len(p) > maxlen {
+		p = p[:maxlen]
+		n, err = se.cache.ReadAt(se.locator, p, off+int64(se.offset))
+		if err == nil {
+			err = io.EOF
 		}
-	})
-	return c.sizes
+		return
+	}
+	return se.cache.ReadAt(se.locator, p, off+int64(se.offset))
+}
+
+type blockCache interface {
+	ReadAt(locator string, p []byte, off int64) (n int, err error)
+}
+
+type keepBlockCache struct {
+	kc keepClient
+}
+
+var scratch = make([]byte, 2<<26)
+
+func (kbc *keepBlockCache) ReadAt(locator string, p []byte, off int64) (int, error) {
+	return kbc.kc.ReadAt(locator, p, off)
 }
 
 func canonicalName(name string) string {
@@ -278,3 +657,21 @@ func canonicalName(name string) string {
 	}
 	return name
 }
+
+var manifestEscapeSeq = regexp.MustCompile(`\\([0-9]{3}|\\)`)
+
+func manifestUnescapeSeq(seq string) string {
+	if seq == `\\` {
+		return `\`
+	}
+	i, err := strconv.ParseUint(seq[1:], 8, 8)
+	if err != nil {
+		// Invalid escape sequence: can't unescape.
+		return seq
+	}
+	return string([]byte{byte(i)})
+}
+
+func manifestUnescape(s string) string {
+	return manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeSeq)
+}
diff --git a/sdk/go/arvados/collection_fs_test.go b/sdk/go/arvados/collection_fs_test.go
index f51d1eb..7bd3570 100644
--- a/sdk/go/arvados/collection_fs_test.go
+++ b/sdk/go/arvados/collection_fs_test.go
@@ -16,17 +16,39 @@ import (
 
 var _ = check.Suite(&CollectionFSSuite{})
 
+type keepClientStub struct {
+	blocks map[string][]byte
+}
+
+func (kcs *keepClientStub) ReadAt(locator string, p []byte, off int64) (int, error) {
+	buf := kcs.blocks[locator[:32]]
+	if buf == nil {
+		return 0, os.ErrNotExist
+	}
+	return copy(p, buf[int(off):]), nil
+}
+
 type CollectionFSSuite struct {
 	client *Client
 	coll   Collection
-	fs     http.FileSystem
+	fs     CollectionFileSystem
+	kc     keepClient
 }
 
 func (s *CollectionFSSuite) SetUpTest(c *check.C) {
 	s.client = NewClientFromEnv()
 	err := s.client.RequestAndDecode(&s.coll, "GET", "arvados/v1/collections/"+arvadostest.FooAndBarFilesInDirUUID, nil, nil)
 	c.Assert(err, check.IsNil)
-	s.fs = s.coll.FileSystem(s.client, nil)
+	s.kc = &keepClientStub{
+		blocks: map[string][]byte{
+			"3858f62230ac3c915f300c664312c63f": []byte("foobar"),
+		}}
+	s.fs = s.coll.FileSystem(s.client, s.kc)
+}
+
+func (s *CollectionFSSuite) TestHttpFileSystemInterface(c *check.C) {
+	_, ok := s.fs.(http.FileSystem)
+	c.Check(ok, check.Equals, true)
 }
 
 func (s *CollectionFSSuite) TestReaddirFull(c *check.C) {
@@ -58,7 +80,7 @@ func (s *CollectionFSSuite) TestReaddirLimited(c *check.C) {
 	}
 
 	fis, err = f.Readdir(1)
-	c.Check(err, check.Equals, io.EOF)
+	c.Check(err, check.IsNil)
 	c.Check(len(fis), check.Equals, 1)
 	if len(fis) > 0 {
 		c.Check(fis[0].Size(), check.Equals, int64(3))
@@ -76,7 +98,7 @@ func (s *CollectionFSSuite) TestReaddirLimited(c *check.C) {
 	c.Assert(err, check.IsNil)
 	fis, err = f.Readdir(2)
 	c.Check(len(fis), check.Equals, 1)
-	c.Assert(err, check.Equals, io.EOF)
+	c.Assert(err, check.IsNil)
 	fis, err = f.Readdir(2)
 	c.Check(len(fis), check.Equals, 0)
 	c.Assert(err, check.Equals, io.EOF)
diff --git a/sdk/go/keepclient/block_cache.go b/sdk/go/keepclient/block_cache.go
index e841a00..c1138fa 100644
--- a/sdk/go/keepclient/block_cache.go
+++ b/sdk/go/keepclient/block_cache.go
@@ -49,6 +49,19 @@ func (c *BlockCache) Sweep() {
 	}
 }
 
+// ReadAt returns data from the cache, first retrieving it from Keep if
+// necessary.
+func (c *BlockCache) ReadAt(kc *KeepClient, locator string, p []byte, off int64) (int, error) {
+	buf, err := c.Get(kc, locator)
+	if err != nil {
+		return 0, err
+	}
+	if off > int64(len(buf)) {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return copy(p, buf[int(off):]), nil
+}
+
 // Get returns data from the cache, first retrieving it from Keep if
 // necessary.
 func (c *BlockCache) Get(kc *KeepClient, locator string) ([]byte, error) {
diff --git a/sdk/go/keepclient/collectionreader.go b/sdk/go/keepclient/collectionreader.go
index 57829aa..3f39aff 100644
--- a/sdk/go/keepclient/collectionreader.go
+++ b/sdk/go/keepclient/collectionreader.go
@@ -43,14 +43,7 @@ func (kc *KeepClient) CollectionFileReader(collection map[string]interface{}, fi
 }
 
 func (kc *KeepClient) ManifestFileReader(m manifest.Manifest, filename string) (arvados.File, error) {
-	f := &file{
-		kc: kc,
-	}
-	err := f.load(m, filename)
-	if err != nil {
-		return nil, err
-	}
-	return f, nil
+	return (&arvados.Collection{ManifestText: m.Text}).FileSystem(nil, kc).OpenFile(filename, os.O_RDONLY, 0)
 }
 
 type file struct {
diff --git a/sdk/go/keepclient/keepclient.go b/sdk/go/keepclient/keepclient.go
index cbfad81..c3d63ed 100644
--- a/sdk/go/keepclient/keepclient.go
+++ b/sdk/go/keepclient/keepclient.go
@@ -292,6 +292,12 @@ func (kc *KeepClient) Get(locator string) (io.ReadCloser, int64, string, error)
 	return kc.getOrHead("GET", locator)
 }
 
+// ReadAt() retrieves a portion of block from the cache if it's
+// present, otherwise from the network.
+func (kc *KeepClient) ReadAt(locator string, p []byte, off int64) (int, error) {
+	return kc.cache().ReadAt(kc, locator, p, off)
+}
+
 // Ask() verifies that a block with the given hash is available and
 // readable, according to at least one Keep service. Unlike Get, it
 // does not retrieve the data or verify that the data content matches

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list