[ARVADOS] updated: a1fc5b9e889f8359a32470c3a7d91190d0894899

git at public.curoverse.com git at public.curoverse.com
Tue Feb 24 17:11:33 EST 2015


Summary of changes:
 sdk/python/arvados/__init__.py             |   2 +-
 sdk/python/arvados/_ranges.py              |  22 +--
 sdk/python/arvados/{apisetup.py => api.py} |   0
 sdk/python/arvados/arvfile.py              |  92 ++++++------
 sdk/python/arvados/collection.py           | 226 ++++++++++++++---------------
 sdk/python/arvados/errors.py               |   9 +-
 sdk/python/arvados/keep.py                 |  37 +++--
 sdk/python/arvados/safeapi.py              |   4 +-
 sdk/python/arvados/stream.py               |   4 -
 sdk/python/tests/arvados_testutil.py       |   3 -
 sdk/python/tests/test_arv_put.py           |  42 +++---
 sdk/python/tests/test_arvfile.py           |   4 +-
 sdk/python/tests/test_collections.py       |  34 ++---
 sdk/python/tests/test_keep_locator.py      |   6 +-
 sdk/python/tests/test_sdk.py               |   2 +-
 15 files changed, 237 insertions(+), 250 deletions(-)
 rename sdk/python/arvados/{apisetup.py => api.py} (100%)

       via  a1fc5b9e889f8359a32470c3a7d91190d0894899 (commit)
      from  ad69b48e324c3ce29a4d2c84732dfd3d0288ebb3 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit a1fc5b9e889f8359a32470c3a7d91190d0894899
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date:   Tue Feb 24 17:13:33 2015 -0500

    4823: Remove sync_mode() from Collection in favor of writable() flag.
    Collection constructor raises ArgumentError() on bad manifest.  Fix
    assertEquals() -> assertEqual().

diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
index 91fe460..8986e41 100644
--- a/sdk/python/arvados/__init__.py
+++ b/sdk/python/arvados/__init__.py
@@ -18,7 +18,7 @@ import fcntl
 import time
 import threading
 
-from apisetup import api, http_cache
+from .api import api, http_cache
 from collection import CollectionReader, CollectionWriter, ResumableCollectionWriter
 from keep import *
 from stream import *
diff --git a/sdk/python/arvados/_ranges.py b/sdk/python/arvados/_ranges.py
index 15f0173..6f22bad 100644
--- a/sdk/python/arvados/_ranges.py
+++ b/sdk/python/arvados/_ranges.py
@@ -10,7 +10,7 @@ class Range(object):
         self.segment_offset = segment_offset
 
     def __repr__(self):
-        return "Range(\"%s\", %i, %i, %i)" % (self.locator, self.range_start, self.range_size, self.segment_offset)
+        return "Range(%r, %r, %r, %r)" % (self.locator, self.range_start, self.range_size, self.segment_offset)
 
     def __eq__(self, other):
         return (self.locator == other.locator and
@@ -63,7 +63,7 @@ class LocatorAndRange(object):
                  self.segment_size == other.segment_size)
 
     def __repr__(self):
-        return "LocatorAndRange(\"%s\", %i, %i, %i)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
+        return "LocatorAndRange(%r, %r, %r, %r)" % (self.locator, self.block_size, self.segment_offset, self.segment_size)
 
 def locators_and_ranges(data_locators, range_start, range_size):
     """Get blocks that are covered by the range and return list of LocatorAndRange
@@ -90,6 +90,8 @@ def locators_and_ranges(data_locators, range_start, range_size):
     if i is None:
         return []
 
+    # We should always start at the first segment due to the binary
+    # search.
     while i < len(data_locators):
         dl = data_locators[i]
         block_start = dl.range_start
@@ -100,13 +102,6 @@ def locators_and_ranges(data_locators, range_start, range_size):
             # range ends before this block starts, so don't look at any more locators
             break
 
-        #if range_start >= block_end:
-            # Range starts after this block ends, so go to next block.
-            # We should always start at the first block due to the binary
-            # search above, so this test is unnecessary but useful to help
-            # document the algorithm.
-            #next
-
         if range_start >= block_start and range_end <= block_end:
             # range starts and ends in this block
             resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), range_size))
@@ -170,6 +165,8 @@ def replace_range(data_locators, new_range_start, new_range_size, new_locator, n
     if i is None:
         return
 
+    # We should always start at the first segment due to the binary
+    # search.
     while i < len(data_locators):
         dl = data_locators[i]
         old_segment_start = dl.range_start
@@ -179,13 +176,6 @@ def replace_range(data_locators, new_range_start, new_range_size, new_locator, n
             # range ends before this segment starts, so don't look at any more locators
             break
 
-        #if range_start >= old_segment_end:
-            # Range starts after this segment ends, so go to next segment.
-            # We should always start at the first segment due to the binary
-            # search above, so this test is unnecessary but useful to help
-            # document the algorithm.
-            #next
-
         if  old_segment_start <= new_range_start and new_range_end <= old_segment_end:
             # new range starts and ends in old segment
             # split segment into up to 3 pieces
diff --git a/sdk/python/arvados/apisetup.py b/sdk/python/arvados/api.py
similarity index 100%
rename from sdk/python/arvados/apisetup.py
rename to sdk/python/arvados/api.py
diff --git a/sdk/python/arvados/arvfile.py b/sdk/python/arvados/arvfile.py
index d9d9cd2..c4bbf64 100644
--- a/sdk/python/arvados/arvfile.py
+++ b/sdk/python/arvados/arvfile.py
@@ -6,7 +6,6 @@ from ._ranges import locators_and_ranges, replace_range, Range
 from arvados.retry import retry_method
 import config
 import hashlib
-import hashlib
 import threading
 import Queue
 import copy
@@ -16,10 +15,11 @@ from .keep import KeepLocator
 from _normalize_stream import normalize_stream
 
 def split(path):
-    """Separate the stream name and file name in a /-separated stream path and
-    return a tuple (stream_name, file_name).
+    """split(path) -> streamname, filename
 
-    If no stream name is available, assume '.'.
+    Separate the stream name and file name in a /-separated stream path and
+    return a tuple (stream_name, file_name).  If no stream name is available,
+    assume '.'.
 
     """
     try:
@@ -58,15 +58,8 @@ class _FileLikeObjectBase(object):
 
 
 class ArvadosFileReaderBase(_FileLikeObjectBase):
-    class _NameAttribute(str):
-        # The Python file API provides a plain .name attribute.
-        # Older SDK provided a name() method.
-        # This class provides both, for maximum compatibility.
-        def __call__(self):
-            return self
-
     def __init__(self, name, mode, num_retries=None):
-        super(ArvadosFileReaderBase, self).__init__(self._NameAttribute(name), mode)
+        super(ArvadosFileReaderBase, self).__init__(name, mode)
         self._filepos = 0L
         self.num_retries = num_retries
         self._readline_cache = (None, None)
@@ -171,8 +164,15 @@ class ArvadosFileReaderBase(_FileLikeObjectBase):
 
 
 class StreamFileReader(ArvadosFileReaderBase):
+    class _NameAttribute(str):
+        # The Python file API provides a plain .name attribute.
+        # Older SDK provided a name() method.
+        # This class provides both, for maximum compatibility.
+        def __call__(self):
+            return self
+
     def __init__(self, stream, segments, name):
-        super(StreamFileReader, self).__init__(name, 'rb', num_retries=stream.num_retries)
+        super(StreamFileReader, self).__init__(self._NameAttribute(name), 'rb', num_retries=stream.num_retries)
         self._stream = stream
         self.segments = segments
 
@@ -194,7 +194,7 @@ class StreamFileReader(ArvadosFileReaderBase):
         available_chunks = locators_and_ranges(self.segments, self._filepos, size)
         if available_chunks:
             lr = available_chunks[0]
-            data = self._stream._readfrom(lr.locator+lr.segment_offset,
+            data = self._stream.readfrom(lr.locator+lr.segment_offset,
                                           lr.segment_size,
                                           num_retries=num_retries)
 
@@ -210,7 +210,7 @@ class StreamFileReader(ArvadosFileReaderBase):
 
         data = []
         for lr in locators_and_ranges(self.segments, start, size):
-            data.append(self._stream._readfrom(lr.locator+lr.segment_offset, lr.segment_size,
+            data.append(self._stream.readfrom(lr.locator+lr.segment_offset, lr.segment_size,
                                               num_retries=num_retries))
         return ''.join(data)
 
@@ -229,8 +229,7 @@ def synchronized(orig_func):
     return synchronized_wrapper
 
 class _BufferBlock(object):
-    """A BufferBlock is a stand-in for a Keep block that is in the process of being
-    written.
+    """A stand-in for a Keep block that is in the process of being written.
 
     Writers can append to it, get the size, and compute the Keep locator.
     There are three valid states:
@@ -321,6 +320,14 @@ class _BufferBlock(object):
             self._locator = "%s+%i" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())
         return self._locator
 
+    @synchronized
+    def clone(self, new_blockid, owner):
+        if self._state == _BufferBlock.COMMITTED:
+            raise AssertionError("Can only duplicate a writable or pending buffer block")
+        bufferblock = _BufferBlock(new_blockid, self.size(), owner)
+        bufferblock.append(self.buffer_view[0:self.size()])
+        return bufferblock
+
 
 class NoopLock(object):
     def __enter__(self):
@@ -335,22 +342,20 @@ class NoopLock(object):
     def release(self):
         pass
 
-SYNC_READONLY = 1
-SYNC_EXPLICIT = 2
-SYNC_LIVE = 3
-
 def must_be_writable(orig_func):
     @functools.wraps(orig_func)
     def must_be_writable_wrapper(self, *args, **kwargs):
-        if self.sync_mode() == SYNC_READONLY:
-            raise IOError((errno.EROFS, "Collection is read only"))
+        if not self.writable():
+            raise IOError((errno.EROFS, "Collection must be writable."))
         return orig_func(self, *args, **kwargs)
     return must_be_writable_wrapper
 
 
 class _BlockManager(object):
-    """BlockManager handles buffer blocks, background block uploads, and background
-    block prefetch for a Collection of ArvadosFiles.
+    """BlockManager handles buffer blocks.
+
+    Also handles background block uploads, and background block prefetch for a
+    Collection of ArvadosFiles.
 
     """
     def __init__(self, keep):
@@ -389,8 +394,7 @@ class _BlockManager(object):
 
     @synchronized
     def dup_block(self, block, owner):
-        """Create a new bufferblock in WRITABLE state, initialized with the content of
-        an existing bufferblock.
+        """Create a new bufferblock initialized with the content of an existing bufferblock.
 
         :block:
           the buffer block to copy.
@@ -400,12 +404,7 @@ class _BlockManager(object):
 
         """
         new_blockid = "bufferblock%i" % len(self._bufferblocks)
-        with block.lock:
-            if block._state == _BufferBlock.COMMITTED:
-                raise AssertionError("Can only duplicate a writable or pending buffer block")
-
-            bufferblock = _BufferBlock(new_blockid, block.size(), owner)
-            bufferblock.append(block.buffer_view[0:block.size()])
+        bufferblock = block.clone(new_blockid, owner)
         self._bufferblocks[bufferblock.blockid] = bufferblock
         return bufferblock
 
@@ -454,7 +453,6 @@ class _BlockManager(object):
                     bufferblock.set_state(_BufferBlock.COMMITTED, loc)
 
                 except Exception as e:
-                    print e
                     self._put_errors.put((bufferblock.locator(), e))
                 finally:
                     if self._put_queue is not None:
@@ -559,7 +557,7 @@ class _BlockManager(object):
                     if b is None:
                         return
                     self._keep.get(b)
-                except:
+                except Exception:
                     pass
 
         with self.lock:
@@ -577,7 +575,9 @@ class _BlockManager(object):
 
 
 class ArvadosFile(object):
-    """ArvadosFile manages the underlying representation of a file in Keep as a
+    """Represent a file in a Collection.
+
+    ArvadosFile manages the underlying representation of a file in Keep as a
     sequence of segments spanning a set of blocks, and implements random
     read/write access.
 
@@ -603,8 +603,8 @@ class ArvadosFile(object):
             self._add_segment(stream, s.locator, s.range_size)
         self._current_bblock = None
 
-    def sync_mode(self):
-        return self.parent.sync_mode()
+    def writable(self):
+        return self.parent.writable()
 
     @synchronized
     def segments(self):
@@ -734,8 +734,7 @@ class ArvadosFile(object):
         return ''.join(data)
 
     def _repack_writes(self):
-        """Test if the buffer block has more data than is referenced by actual
-        segments.
+        """Test if the buffer block has more data than actual segments.
 
         This happens when a buffered write over-writes a file range written in
         a previous buffered write.  Re-pack the buffer block for efficiency
@@ -795,8 +794,10 @@ class ArvadosFile(object):
     @must_be_writable
     @synchronized
     def add_segment(self, blocks, pos, size):
-        """Add a segment to the end of the file, with `pos` and `offset` referencing a
-        section of the stream described by `blocks` (a list of Range objects)
+        """Add a segment to the end of the file.
+
+        `pos` and `offset` reference a section of the stream described by
+        `blocks` (a list of Range objects)
 
         """
         self._add_segment(blocks, pos, size)
@@ -832,8 +833,7 @@ class ArvadosFile(object):
                 loc = KeepLocator(loc).stripped()
             filestream.append(LocatorAndRange(loc, locator_block_size(loc),
                                  segment.segment_offset, segment.range_size))
-        stream[stream_name] = filestream
-        buf += ' '.join(normalize_stream(stream_name, stream))
+        buf += ' '.join(normalize_stream(stream_name, {stream_name: filestream}))
         buf += "\n"
         return buf
 
@@ -906,7 +906,3 @@ class ArvadosFileWriter(ArvadosFileReader):
         self.arvadosfile.truncate(size)
         if self._filepos > self.size():
             self._filepos = self.size()
-
-    def close(self):
-        if self.arvadosfile.parent.sync_mode() == SYNC_LIVE:
-            self.arvadosfile.parent.root_collection().save()
diff --git a/sdk/python/arvados/collection.py b/sdk/python/arvados/collection.py
index a77f309..12fa9ae 100644
--- a/sdk/python/arvados/collection.py
+++ b/sdk/python/arvados/collection.py
@@ -10,7 +10,7 @@ import threading
 from collections import deque
 from stat import *
 
-from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, _BlockManager, synchronized, must_be_writable, SYNC_READONLY, SYNC_EXPLICIT, NoopLock
+from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, _BlockManager, synchronized, must_be_writable, NoopLock
 from keep import KeepLocator, KeepClient
 from .stream import StreamReader
 from ._normalize_stream import normalize_stream
@@ -38,7 +38,8 @@ class CollectionBase(object):
         return self._keep_client
 
     def stripped_manifest(self):
-        """
+        """Get the manifest with locator hints stripped.
+
         Return the manifest for the current collection with all
         non-portable hints (i.e., permission signatures and other
         hints other than size hints) removed from the locators.
@@ -497,7 +498,7 @@ class SynchronizedCollectionBase(CollectionBase):
     def _my_block_manager(self):
         raise NotImplementedError()
 
-    def sync_mode(self):
+    def writable(self):
         raise NotImplementedError()
 
     def root_collection(self):
@@ -520,20 +521,18 @@ class SynchronizedCollectionBase(CollectionBase):
         the path.
 
         :create_type:
-          One of `arvado.collection.FILE` or
-          `arvado.collection.COLLECTION`.  If the path is not found, and value
+          One of `arvados.collection.FILE` or
+          `arvados.collection.COLLECTION`.  If the path is not found, and value
           of create_type is FILE then create and return a new ArvadosFile for
           the last path component.  If COLLECTION, then create and return a new
           Collection for the last path component.
 
         """
 
-        pathcomponents = path.split("/")
-
-        if pathcomponents and pathcomponents[0]:
+        pathcomponents = path.split("/", 1)
+        if pathcomponents[0]:
             item = self._items.get(pathcomponents[0])
             if len(pathcomponents) == 1:
-                # item must be a file
                 if item is None:
                     # create new file
                     if create_type == COLLECTION:
@@ -551,9 +550,8 @@ class SynchronizedCollectionBase(CollectionBase):
                     self._items[pathcomponents[0]] = item
                     self._modified = True
                     self.notify(ADD, self, pathcomponents[0], item)
-                del pathcomponents[0]
                 if isinstance(item, SynchronizedCollectionBase):
-                    return item.find_or_create("/".join(pathcomponents), create_type)
+                    return item.find_or_create(pathcomponents[1], create_type)
                 else:
                     raise IOError((errno.ENOTDIR, "Interior path components must be subcollection"))
         else:
@@ -567,21 +565,21 @@ class SynchronizedCollectionBase(CollectionBase):
         found.
 
         """
-        pathcomponents = path.split("/")
+        if not path:
+            raise errors.ArgumentError("Parameter 'path' must not be empty.")
 
-        if pathcomponents and pathcomponents[0]:
-            item = self._items.get(pathcomponents[0])
-            if len(pathcomponents) == 1:
-                # item must be a file
-                return item
-            else:
-                del pathcomponents[0]
-                if isinstance(item, SynchronizedCollectionBase):
-                    return item.find("/".join(pathcomponents))
-                else:
-                    raise IOError((errno.ENOTDIR, "Interior path components must be subcollection"))
+        pathcomponents = path.split("/", 1)
+        item = self._items.get(pathcomponents[0])
+        if len(pathcomponents) == 1:
+            return item
         else:
-            return self
+            if isinstance(item, SynchronizedCollectionBase):
+                if pathcomponents[1]:
+                    return item.find(pathcomponents[1])
+                else:
+                    return item
+            else:
+                raise IOError((errno.ENOTDIR, "Interior path components must be subcollection"))
 
     def mkdirs(path):
         """Recursive subcollection create.
@@ -615,7 +613,7 @@ class SynchronizedCollectionBase(CollectionBase):
             raise errors.ArgumentError("Bad mode '%s'" % mode)
         create = (mode != "r")
 
-        if create and self.sync_mode() == SYNC_READONLY:
+        if create and not self.writable():
             raise IOError((errno.EROFS, "Collection is read only"))
 
         if create:
@@ -640,8 +638,7 @@ class SynchronizedCollectionBase(CollectionBase):
 
     @synchronized
     def modified(self):
-        """Test if the collection (or any subcollection or file) has been modified
-        since it was created."""
+        """Test if the collection (or any subcollection or file) has been modified."""
         if self._modified:
             return True
         for k,v in self._items.items():
@@ -662,21 +659,17 @@ class SynchronizedCollectionBase(CollectionBase):
         return iter(self._items.keys())
 
     @synchronized
-    def iterkeys(self):
-        """Iterate over names of files and collections directly contained in this collection."""
-        return self._items.keys()
-
-    @synchronized
     def __getitem__(self, k):
-        """Get a file or collection that is directly contained by this collection.  If
-        you want to search a path, use `find()` instead.
+        """Get a file or collection that is directly contained by this collection.
+
+        If you want to search a path, use `find()` instead.
+
         """
         return self._items[k]
 
     @synchronized
     def __contains__(self, k):
-        """If there is a file or collection a directly contained by this collection
-        with name `k`."""
+        """Test if there is a file or collection a directly contained by this collection."""
         return k in self._items
 
     @synchronized
@@ -719,28 +712,27 @@ class SynchronizedCollectionBase(CollectionBase):
         :recursive:
           Specify whether to remove non-empty subcollections (True), or raise an error (False).
         """
-        pathcomponents = path.split("/")
 
-        if len(pathcomponents) > 0:
-            item = self._items.get(pathcomponents[0])
-            if item is None:
-                raise IOError((errno.ENOENT, "File not found"))
-            if len(pathcomponents) == 1:
-                if isinstance(self._items[pathcomponents[0]], SynchronizedCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:
-                    raise IOError((errno.ENOTEMPTY, "Subcollection not empty"))
-                deleteditem = self._items[pathcomponents[0]]
-                del self._items[pathcomponents[0]]
-                self._modified = True
-                self.notify(DEL, self, pathcomponents[0], deleteditem)
-            else:
-                del pathcomponents[0]
-                item.remove("/".join(pathcomponents))
-        else:
+        if not path:
+            raise errors.ArgumentError("Parameter 'path' must not be empty.")
+
+        pathcomponents = path.split("/", 1)
+        item = self._items.get(pathcomponents[0])
+        if item is None:
             raise IOError((errno.ENOENT, "File not found"))
+        if len(pathcomponents) == 1:
+            if isinstance(self._items[pathcomponents[0]], SynchronizedCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:
+                raise IOError((errno.ENOTEMPTY, "Subcollection not empty"))
+            deleteditem = self._items[pathcomponents[0]]
+            del self._items[pathcomponents[0]]
+            self._modified = True
+            self.notify(DEL, self, pathcomponents[0], deleteditem)
+        else:
+            item.remove(pathcomponents[1])
 
-    def _cloneinto(self, target):
-        for k,v in self._items.items():
-            target._items[k] = v.clone(target)
+    def _clonefrom(self, source):
+        for k,v in source.items():
+            self._items[k] = v.clone(self)
 
     def clone(self):
         raise NotImplementedError()
@@ -832,7 +824,7 @@ class SynchronizedCollectionBase(CollectionBase):
         if self.modified() or self._manifest_text is None or normalize:
             item  = self
             stream = {}
-            buf = ""
+            buf = []
             sorted_keys = sorted(item.keys())
             for filename in [s for s in sorted_keys if isinstance(item[s], ArvadosFile)]:
                 # Create a stream per file `k`
@@ -848,11 +840,10 @@ class SynchronizedCollectionBase(CollectionBase):
                                          segment.segment_offset, segment.range_size))
                 stream[filename] = filestream
             if stream:
-                buf += ' '.join(normalize_stream(stream_name, stream))
-                buf += "\n"
+                buf.append(" ".join(normalize_stream(stream_name, stream)) + "\n")
             for dirname in [s for s in sorted_keys if isinstance(item[s], SynchronizedCollectionBase)]:
-                buf += item[dirname].manifest_text(stream_name=os.path.join(stream_name, dirname), strip=strip)
-            return buf
+                buf.append(item[dirname].manifest_text(stream_name=os.path.join(stream_name, dirname), strip=strip))
+            return "".join(buf)
         else:
             if strip:
                 return self.stripped_manifest()
@@ -861,9 +852,10 @@ class SynchronizedCollectionBase(CollectionBase):
 
     @synchronized
     def diff(self, end_collection, prefix=".", holding_collection=None):
-        """
-        Generate list of add/modify/delete actions which, when given to `apply`, will
-        change `self` to match `end_collection`
+        """Generate list of add/modify/delete actions.
+
+        When given to `apply`, will change `self` to match `end_collection`
+
         """
         changes = []
         if holding_collection is None:
@@ -953,10 +945,14 @@ class SynchronizedCollectionBase(CollectionBase):
 
 
 class Collection(SynchronizedCollectionBase):
-    """Represents the root of an Arvados Collection, which may be associated with
-    an API server Collection record.
+    """Represents the root of an Arvados Collection.
+
+    This class is threadsafe.  The root collection object, all subcollections
+    and files are protected by a single lock (i.e. each access locks the entire
+    collection).
 
-    Brief summary of useful methods:
+    Brief summary of
+    useful methods:
 
     :To read an existing file:
       `c.open("myfile", "r")`
@@ -982,9 +978,8 @@ class Collection(SynchronizedCollectionBase):
     :To merge remote changes into this object:
       `c.update()`
 
-    This class is threadsafe.  The root collection object, all subcollections
-    and files are protected by a single lock (i.e. each access locks the entire
-    collection).
+    Must be associated with an API server Collection record (during
+    initialization, or using `save_new`) to use `save` or `update`
 
     """
 
@@ -1031,7 +1026,6 @@ class Collection(SynchronizedCollectionBase):
         self._manifest_text = None
         self._api_response = None
 
-        self._sync = SYNC_EXPLICIT
         self.lock = threading.RLock()
         self.callbacks = []
         self.events = None
@@ -1047,8 +1041,10 @@ class Collection(SynchronizedCollectionBase):
                 raise errors.ArgumentError(
                     "Argument to CollectionReader must be a manifest or a collection UUID")
 
-            self._populate()
-
+            try:
+                self._populate()
+            except (IOError, errors.SyntaxError) as e:
+                raise errors.ArgumentError("Error processing manifest text: %s", e)
 
     def root_collection(self):
         return self
@@ -1056,16 +1052,14 @@ class Collection(SynchronizedCollectionBase):
     def stream_name(self):
         return "."
 
-    def sync_mode(self):
-        return self._sync
+    def writable(self):
+        return True
 
     @synchronized
     @retry_method
     def update(self, other=None, num_retries=None):
-        """Fetch the latest collection record on the API server and merge it with the
-        current collection contents.
+        """Merge the latest collection on the API server with the current collection."""
 
-        """
         if other is None:
             if self._manifest_locator is None:
                 raise errors.ArgumentError("`other` is None but collection does not have a manifest_locator uuid")
@@ -1146,7 +1140,7 @@ class Collection(SynchronizedCollectionBase):
             error_via_keep = self._populate_from_keep()
         if self._manifest_text is None:
             # Nothing worked!
-            raise arvados.errors.NotFoundError(
+            raise errors.NotFoundError(
                 ("Failed to retrieve collection '{}' " +
                  "from either API server ({}) or Keep ({})."
                  ).format(
@@ -1166,8 +1160,9 @@ class Collection(SynchronizedCollectionBase):
 
     def __exit__(self, exc_type, exc_value, traceback):
         """Support scoped auto-commit in a with: block."""
-        if self._sync != SYNC_READONLY and self._has_collection_uuid():
-            self.save()
+        if exc_type is not None:
+            if self.writable() and self._has_collection_uuid():
+                self.save()
         if self._block_manager is not None:
             self._block_manager.stop_threads()
 
@@ -1180,9 +1175,7 @@ class Collection(SynchronizedCollectionBase):
         else:
             newcollection = Collection(parent=new_parent, apiconfig=new_config)
 
-        newcollection._sync = None
-        self._cloneinto(newcollection)
-        newcollection._sync = SYNC_READONLY if readonly else SYNC_EXPLICIT
+        newcollection._clonefrom(self)
         return newcollection
 
     @synchronized
@@ -1220,7 +1213,9 @@ class Collection(SynchronizedCollectionBase):
     @synchronized
     @retry_method
     def save(self, merge=True, num_retries=None):
-        """Commit pending buffer blocks to Keep, merge with remote record (if
+        """Save collection to an existing collection record.
+
+        Commit pending buffer blocks to Keep, merge with remote record (if
         update=True), write the manifest to Keep, and update the collection
         record.
 
@@ -1255,9 +1250,10 @@ class Collection(SynchronizedCollectionBase):
     @synchronized
     @retry_method
     def save_new(self, name=None, create_collection_record=True, owner_uuid=None, ensure_unique_name=False, num_retries=None):
-        """Commit pending buffer blocks to Keep, write the manifest to Keep, and create
-        a new collection record (if create_collection_record True).
+        """Save collection to a new collection record.
 
+        Commit pending buffer blocks to Keep, write the manifest to Keep, and
+        create a new collection record (if create_collection_record True).
         After creating a new collection record, this Collection object will be
         associated with the new record used by `save()`.
 
@@ -1322,9 +1318,6 @@ class Collection(SynchronizedCollectionBase):
         if len(self) > 0:
             raise ArgumentError("Can only import manifest into an empty collection")
 
-        save_sync = self.sync_mode()
-        self._sync = None
-
         STREAM_NAME = 0
         BLOCKS = 1
         SEGMENTS = 2
@@ -1332,9 +1325,9 @@ class Collection(SynchronizedCollectionBase):
         stream_name = None
         state = STREAM_NAME
 
-        for n in re.finditer(r'(\S+)(\s+|$)', manifest_text):
-            tok = n.group(1)
-            sep = n.group(2)
+        for token_and_separator in re.finditer(r'(\S+)(\s+|$)', manifest_text):
+            tok = token_and_separator.group(1)
+            sep = token_and_separator.group(2)
 
             if state == STREAM_NAME:
                 # starting a new stream
@@ -1346,24 +1339,24 @@ class Collection(SynchronizedCollectionBase):
                 continue
 
             if state == BLOCKS:
-                s = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', tok)
-                if s:
-                    blocksize = long(s.group(1))
+                block_locator = re.match(r'[0-9a-f]{32}\+(\d+)(\+\S+)*', tok)
+                if block_locator:
+                    blocksize = long(block_locator.group(1))
                     blocks.append(Range(tok, streamoffset, blocksize))
                     streamoffset += blocksize
                 else:
                     state = SEGMENTS
 
             if state == SEGMENTS:
-                s = re.search(r'^(\d+):(\d+):(\S+)', tok)
-                if s:
-                    pos = long(s.group(1))
-                    size = long(s.group(2))
-                    name = s.group(3).replace('\\040', ' ')
+                file_segment = re.search(r'^(\d+):(\d+):(\S+)', tok)
+                if file_segment:
+                    pos = long(file_segment.group(1))
+                    size = long(file_segment.group(2))
+                    name = file_segment.group(3).replace('\\040', ' ')
                     filepath = os.path.join(stream_name, name)
-                    f = self.find_or_create(filepath, FILE)
-                    if isinstance(f, ArvadosFile):
-                        f.add_segment(blocks, pos, size)
+                    afile = self.find_or_create(filepath, FILE)
+                    if isinstance(afile, ArvadosFile):
+                        afile.add_segment(blocks, pos, size)
                     else:
                         raise errors.SyntaxError("File %s conflicts with stream of the same name.", filepath)
                 else:
@@ -1375,7 +1368,6 @@ class Collection(SynchronizedCollectionBase):
                 state = STREAM_NAME
 
         self.set_unmodified()
-        self._sync = save_sync
 
 
 class Subcollection(SynchronizedCollectionBase):
@@ -1394,8 +1386,8 @@ class Subcollection(SynchronizedCollectionBase):
     def root_collection(self):
         return self.parent.root_collection()
 
-    def sync_mode(self):
-        return self.root_collection().sync_mode()
+    def writable(self):
+        return self.root_collection().writable()
 
     def _my_api(self):
         return self.root_collection()._my_api()
@@ -1418,31 +1410,33 @@ class Subcollection(SynchronizedCollectionBase):
     @synchronized
     def clone(self, new_parent):
         c = Subcollection(new_parent)
-        self._cloneinto(c)
+        c._clonefrom(self)
         return c
 
 
 class CollectionReader(Collection):
-    """A read-only collection object from an api collection record locator,
-    a portable data hash of a manifest, or raw manifest text.
+    """A read-only collection object.
 
-    See `Collection` constructor for detailed options.
+    Initialize from an api collection record locator, a portable data hash of a
+    manifest, or raw manifest text.  See `Collection` constructor for detailed
+    options.
 
     """
-    def __init__(self, *args, **kwargs):
-        if not args and not kwargs.get("manifest_locator_or_text"):
-            raise errors.ArgumentError("Must provide manifest locator or text to initialize ReadOnlyCollection")
-
-        super(CollectionReader, self).__init__(*args, **kwargs)
+    def __init__(self, manifest_locator_or_text, *args, **kwargs):
+        self._in_init = True
+        super(CollectionReader, self).__init__(manifest_locator_or_text, *args, **kwargs)
+        self._in_init = False
 
         # Forego any locking since it should never change once initialized.
         self.lock = NoopLock()
-        self._sync = SYNC_READONLY
 
         # Backwards compatability with old CollectionReader
         # all_streams() and all_files()
         self._streams = None
 
+    def writable(self):
+        return self._in_init
+
     def _populate_streams(orig_func):
         @functools.wraps(orig_func)
         def populate_streams_wrapper(self, *args, **kwargs):
diff --git a/sdk/python/arvados/errors.py b/sdk/python/arvados/errors.py
index 16f4096..7a5c0ea 100644
--- a/sdk/python/arvados/errors.py
+++ b/sdk/python/arvados/errors.py
@@ -19,14 +19,17 @@ class KeepRequestError(Exception):
     def __init__(self, message='', service_errors=()):
         """KeepRequestError(message='', service_errors=())
 
-        Arguments:
-        * message: A human-readable message describing what Keep operation
+        :message:
+          A human-readable message describing what Keep operation
           failed.
-        * service_errors: An iterable that yields 2-tuples of Keep
+
+        :service_errors:
+          An iterable that yields 2-tuples of Keep
           service URLs to the error encountered when talking to
           it--either an exception, or an HTTP response object.  These
           will be packed into an OrderedDict, available through the
           service_errors() method.
+
         """
         self._service_errors = OrderedDict(service_errors)
         if self._service_errors:
diff --git a/sdk/python/arvados/keep.py b/sdk/python/arvados/keep.py
index f59ec71..ab68352 100644
--- a/sdk/python/arvados/keep.py
+++ b/sdk/python/arvados/keep.py
@@ -418,35 +418,52 @@ class KeepClient(object):
         """Initialize a new KeepClient.
 
         Arguments:
-        * api_client: The API client to use to find Keep services.  If not
+        :api_client:
+          The API client to use to find Keep services.  If not
           provided, KeepClient will build one from available Arvados
           configuration.
-        * proxy: If specified, this KeepClient will send requests to this
-          Keep proxy.  Otherwise, KeepClient will fall back to the setting
-          of the ARVADOS_KEEP_PROXY configuration setting.  If you want to
-          ensure KeepClient does not use a proxy, pass in an empty string.
-        * timeout: The timeout (in seconds) for HTTP requests to Keep
+
+        :proxy:
+          If specified, this KeepClient will send requests to this Keep
+          proxy.  Otherwise, KeepClient will fall back to the setting of the
+          ARVADOS_KEEP_PROXY configuration setting.  If you want to ensure
+          KeepClient does not use a proxy, pass in an empty string.
+
+        :timeout:
+          The timeout (in seconds) for HTTP requests to Keep
           non-proxy servers.  A tuple of two floats is interpreted as
           (connection_timeout, read_timeout): see
           http://docs.python-requests.org/en/latest/user/advanced/#timeouts.
           Default: (2, 300).
-        * proxy_timeout: The timeout (in seconds) for HTTP requests to
+
+        :proxy_timeout:
+          The timeout (in seconds) for HTTP requests to
           Keep proxies. A tuple of two floats is interpreted as
           (connection_timeout, read_timeout). Default: (20, 300).
-        * api_token: If you're not using an API client, but only talking
+
+        :api_token:
+          If you're not using an API client, but only talking
           directly to a Keep proxy, this parameter specifies an API token
           to authenticate Keep requests.  It is an error to specify both
           api_client and api_token.  If you specify neither, KeepClient
           will use one available from the Arvados configuration.
-        * local_store: If specified, this KeepClient will bypass Keep
+
+        :local_store:
+          If specified, this KeepClient will bypass Keep
           services, and save data to the named directory.  If unspecified,
           KeepClient will fall back to the setting of the $KEEP_LOCAL_STORE
           environment variable.  If you want to ensure KeepClient does not
           use local storage, pass in an empty string.  This is primarily
           intended to mock a server for testing.
-        * num_retries: The default number of times to retry failed requests.
+
+        :num_retries:
+          The default number of times to retry failed requests.
           This will be used as the default num_retries value when get() and
           put() are called.  Default 0.
+
+        :session:
+          The requests.Session object to use for get() and put() requests.
+          Will create one if not specified.
         """
         self.lock = threading.Lock()
         if proxy is None:
diff --git a/sdk/python/arvados/safeapi.py b/sdk/python/arvados/safeapi.py
index baada91..5c5c872 100644
--- a/sdk/python/arvados/safeapi.py
+++ b/sdk/python/arvados/safeapi.py
@@ -1,5 +1,5 @@
 import threading
-import apisetup
+import api
 import keep
 import config
 import copy
@@ -21,7 +21,7 @@ class ThreadSafeApiCache(object):
 
     def localapi(self):
         if 'api' not in self.local.__dict__:
-            self.local.api = apisetup.api_from_config('v1', apiconfig=self.apiconfig)
+            self.local.api = api.api_from_config('v1', apiconfig=self.apiconfig)
         return self.local.api
 
     def __getattr__(self, name):
diff --git a/sdk/python/arvados/stream.py b/sdk/python/arvados/stream.py
index 85c0320..31638a0 100644
--- a/sdk/python/arvados/stream.py
+++ b/sdk/python/arvados/stream.py
@@ -78,10 +78,6 @@ class StreamReader(object):
 
     @retry_method
     def readfrom(self, start, size, num_retries=None):
-        return self._readfrom(start, size, num_retries=num_retries)
-
-    @retry_method
-    def _readfrom(self, start, size, num_retries=None):
         """Read up to 'size' bytes from the stream, starting at 'start'"""
         if size == 0:
             return ''
diff --git a/sdk/python/tests/arvados_testutil.py b/sdk/python/tests/arvados_testutil.py
index 49ed79c..644dfff 100644
--- a/sdk/python/tests/arvados_testutil.py
+++ b/sdk/python/tests/arvados_testutil.py
@@ -94,9 +94,6 @@ class MockStreamReader(object):
         return self._name
 
     def readfrom(self, start, size, num_retries=None):
-        self._readfrom(start, size, num_retries=num_retries)
-
-    def _readfrom(self, start, size, num_retries=None):
         return self._data[start:start + size]
 
 class ApiClientMock(object):
diff --git a/sdk/python/tests/test_arv_put.py b/sdk/python/tests/test_arv_put.py
index eaefd79..d078268 100644
--- a/sdk/python/tests/test_arv_put.py
+++ b/sdk/python/tests/test_arv_put.py
@@ -43,7 +43,7 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
 
     def test_cache_names_stable(self):
         for argset in self.CACHE_ARGSET:
-            self.assertEquals(self.cache_path_from_arglist(argset),
+            self.assertEqual(self.cache_path_from_arglist(argset),
                               self.cache_path_from_arglist(argset),
                               "cache name changed for {}".format(argset))
 
@@ -65,10 +65,10 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
                              "path too exotic: {}".format(path))
 
     def test_cache_names_ignore_argument_order(self):
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist(['a', 'b', 'c']),
             self.cache_path_from_arglist(['c', 'a', 'b']))
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist(['-', '--filename', 'stdin']),
             self.cache_path_from_arglist(['--filename', 'stdin', '-']))
 
@@ -84,32 +84,32 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
         args = arv_put.parse_arguments(['/tmp'])
         args.filename = 'tmp'
         path2 = arv_put.ResumeCache.make_path(args)
-        self.assertEquals(path1, path2,
+        self.assertEqual(path1, path2,
                          "cache path considered --filename for directory")
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist(['-']),
             self.cache_path_from_arglist(['-', '--max-manifest-depth', '1']),
             "cache path considered --max-manifest-depth for file")
 
     def test_cache_names_treat_negative_manifest_depths_identically(self):
         base_args = ['/tmp', '--max-manifest-depth']
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist(base_args + ['-1']),
             self.cache_path_from_arglist(base_args + ['-2']))
 
     def test_cache_names_treat_stdin_consistently(self):
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist(['-', '--filename', 'test']),
             self.cache_path_from_arglist(['/dev/stdin', '--filename', 'test']))
 
     def test_cache_names_identical_for_synonymous_names(self):
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist(['.']),
             self.cache_path_from_arglist([os.path.realpath('.')]))
         testdir = self.make_tmpdir()
         looplink = os.path.join(testdir, 'loop')
         os.symlink(testdir, looplink)
-        self.assertEquals(
+        self.assertEqual(
             self.cache_path_from_arglist([testdir]),
             self.cache_path_from_arglist([looplink]))
 
@@ -131,7 +131,7 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
         with tempfile.NamedTemporaryFile() as cachefile:
             self.last_cache = arv_put.ResumeCache(cachefile.name)
         self.last_cache.save(thing)
-        self.assertEquals(thing, self.last_cache.load())
+        self.assertEqual(thing, self.last_cache.load())
 
     def test_empty_cache(self):
         with tempfile.NamedTemporaryFile() as cachefile:
@@ -145,7 +145,7 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
         cache.save(thing)
         cache.close()
         self.last_cache = arv_put.ResumeCache(path)
-        self.assertEquals(thing, self.last_cache.load())
+        self.assertEqual(thing, self.last_cache.load())
 
     def test_multiple_cache_writes(self):
         thing = ['short', 'list']
@@ -155,7 +155,7 @@ class ArvadosPutResumeCacheTest(ArvadosBaseTestCase):
         # sure the cache file gets truncated.
         self.last_cache.save(['long', 'long', 'list'])
         self.last_cache.save(thing)
-        self.assertEquals(thing, self.last_cache.load())
+        self.assertEqual(thing, self.last_cache.load())
 
     def test_cache_is_locked(self):
         with tempfile.NamedTemporaryFile() as cachefile:
@@ -216,12 +216,12 @@ class ArvadosPutCollectionWriterTest(run_test_server.TestCaseWithServers,
         cwriter.write_file('/dev/null')
         cwriter.cache_state()
         self.assertTrue(self.cache.load())
-        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+        self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
 
     def test_writer_works_without_cache(self):
         cwriter = arv_put.ArvPutCollectionWriter()
         cwriter.write_file('/dev/null')
-        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+        self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
 
     def test_writer_resumes_from_cache(self):
         cwriter = arv_put.ArvPutCollectionWriter(self.cache)
@@ -230,7 +230,7 @@ class ArvadosPutCollectionWriterTest(run_test_server.TestCaseWithServers,
             cwriter.cache_state()
             new_writer = arv_put.ArvPutCollectionWriter.from_cache(
                 self.cache)
-            self.assertEquals(
+            self.assertEqual(
                 ". 098f6bcd4621d373cade4e832627b4f6+4 0:4:test\n",
                 new_writer.manifest_text())
 
@@ -240,12 +240,12 @@ class ArvadosPutCollectionWriterTest(run_test_server.TestCaseWithServers,
             cwriter.write_file(testfile.name, 'test')
         new_writer = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
         new_writer.write_file('/dev/null')
-        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", new_writer.manifest_text())
+        self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", new_writer.manifest_text())
 
     def test_new_writer_from_empty_cache(self):
         cwriter = arv_put.ArvPutCollectionWriter.from_cache(self.cache)
         cwriter.write_file('/dev/null')
-        self.assertEquals(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
+        self.assertEqual(". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\n", cwriter.manifest_text())
 
     def test_writer_resumable_after_arbitrary_bytes(self):
         cwriter = arv_put.ArvPutCollectionWriter(self.cache)
@@ -255,7 +255,7 @@ class ArvadosPutCollectionWriterTest(run_test_server.TestCaseWithServers,
             cwriter.cache_state()
             new_writer = arv_put.ArvPutCollectionWriter.from_cache(
                 self.cache)
-        self.assertEquals(cwriter.manifest_text(), new_writer.manifest_text())
+        self.assertEqual(cwriter.manifest_text(), new_writer.manifest_text())
 
     def make_progress_tester(self):
         progression = []
@@ -288,16 +288,16 @@ class ArvadosExpectedBytesTest(ArvadosBaseTestCase):
     TEST_SIZE = os.path.getsize(__file__)
 
     def test_expected_bytes_for_file(self):
-        self.assertEquals(self.TEST_SIZE,
+        self.assertEqual(self.TEST_SIZE,
                           arv_put.expected_bytes_for([__file__]))
 
     def test_expected_bytes_for_tree(self):
         tree = self.make_tmpdir()
         shutil.copyfile(__file__, os.path.join(tree, 'one'))
         shutil.copyfile(__file__, os.path.join(tree, 'two'))
-        self.assertEquals(self.TEST_SIZE * 2,
+        self.assertEqual(self.TEST_SIZE * 2,
                           arv_put.expected_bytes_for([tree]))
-        self.assertEquals(self.TEST_SIZE * 3,
+        self.assertEqual(self.TEST_SIZE * 3,
                           arv_put.expected_bytes_for([tree, __file__]))
 
     def test_expected_bytes_for_device(self):
diff --git a/sdk/python/tests/test_arvfile.py b/sdk/python/tests/test_arvfile.py
index 9c67fbd..e63cf85 100644
--- a/sdk/python/tests/test_arvfile.py
+++ b/sdk/python/tests/test_arvfile.py
@@ -12,7 +12,7 @@ import arvados
 from arvados._ranges import Range
 from arvados.keep import KeepLocator
 from arvados.collection import Collection, CollectionReader
-from arvados.arvfile import ArvadosFile, ArvadosFileReader, SYNC_READONLY, SYNC_EXPLICIT
+from arvados.arvfile import ArvadosFile, ArvadosFileReader
 
 import arvados_testutil as tutil
 from test_stream import StreamFileReaderTestCase, StreamRetryTestMixin
@@ -563,5 +563,5 @@ class BlockManagerTest(unittest.TestCase):
         bufferblock.append("foo")
         with self.assertRaises(arvados.errors.KeepWriteError) as err:
             blockmanager.commit_all()
-        self.assertEquals(str(err.exception), "Error writing some blocks: acbd18db4cc2f85cedef654fccc4a4d8+3 raised KeepWriteError (fail)")
+        self.assertEqual(str(err.exception), "Error writing some blocks: acbd18db4cc2f85cedef654fccc4a4d8+3 raised KeepWriteError (fail)")
         self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.PENDING)
diff --git a/sdk/python/tests/test_collections.py b/sdk/python/tests/test_collections.py
index bc94f0d..cba14ad 100644
--- a/sdk/python/tests/test_collections.py
+++ b/sdk/python/tests/test_collections.py
@@ -15,7 +15,6 @@ import unittest
 import run_test_server
 from arvados._ranges import Range, LocatorAndRange
 from arvados.collection import Collection, CollectionReader
-from arvados.arvfile import SYNC_EXPLICIT
 import arvados_testutil as tutil
 
 class TestResumableWriter(arvados.ResumableCollectionWriter):
@@ -434,7 +433,7 @@ class ArvadosCollectionsTest(run_test_server.TestCaseWithServers,
         with self.make_test_file() as testfile:
             cwriter.write_file(testfile.name, 'test')
             resumed = TestResumableWriter.from_state(cwriter.current_state())
-        self.assertEquals(cwriter.manifest_text(), resumed.manifest_text(),
+        self.assertEqual(cwriter.manifest_text(), resumed.manifest_text(),
                           "resumed CollectionWriter had different manifest")
 
     def test_resume_fails_when_missing_dependency(self):
@@ -640,7 +639,7 @@ class CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):
     def check_open_file(self, coll_file, stream_name, file_name, file_size):
         self.assertFalse(coll_file.closed, "returned file is not open")
         self.assertEqual(stream_name, coll_file.stream_name())
-        self.assertEqual(file_name, coll_file.name())
+        self.assertEqual(file_name, coll_file.name)
         self.assertEqual(file_size, coll_file.size())
 
     def test_open_collection_file_one_argument(self):
@@ -821,13 +820,13 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
         m1 = """. 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt
 ./md5sum.txt 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt
 """
-        with self.assertRaises(IOError):
-            self.assertEqual(m1, CollectionReader(m1).manifest_text(normalize=False))
+        with self.assertRaises(arvados.errors.ArgumentError):
+            self.assertEqual(m1, CollectionReader(m1))
 
     def test_init_manifest_with_error(self):
         m1 = """. 0:43:md5sum.txt"""
         with self.assertRaises(arvados.errors.ArgumentError):
-            self.assertEqual(m1, CollectionReader(m1).manifest_text(normalize=False))
+            self.assertEqual(m1, CollectionReader(m1))
 
     def test_remove(self):
         c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\n')
@@ -991,8 +990,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
 
         # c1 changed, so c2 mod will go to a conflict file
         c1.apply(d)
-        self.assertTrue(re.match(r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1.txt 5:10:count1.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$",
-                                 c1.manifest_text()))
+        self.assertRegexpMatches(c1.manifest_text(), r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1\.txt 5:10:count1\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
 
     def test_conflict_add(self):
         c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
@@ -1005,8 +1003,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
 
         # c1 added count1.txt, so c2 add will go to a conflict file
         c1.apply(d)
-        self.assertTrue(re.match(r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1.txt 5:10:count1.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$",
-                                 c1.manifest_text()))
+        self.assertRegexpMatches(c1.manifest_text(), r"\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\+5 5348b82a029fd9e971a811ce1f71360b\+43 0:5:count1\.txt 5:10:count1\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
 
     def test_conflict_del(self):
         c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')
@@ -1017,8 +1014,7 @@ class NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):
 
         # c1 deleted, so c2 mod will go to a conflict file
         c1.apply(d)
-        self.assertTrue(re.match(r"\. 5348b82a029fd9e971a811ce1f71360b\+43 0:10:count1.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$",
-                                 c1.manifest_text()))
+        self.assertRegexpMatches(c1.manifest_text(), r"\. 5348b82a029fd9e971a811ce1f71360b\+43 0:10:count1\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
 
     def test_notify(self):
         c1 = Collection()
@@ -1044,28 +1040,26 @@ class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
 
         c = Collection()
         c.save_new("CollectionCreateUpdateTest", ensure_unique_name=True)
-        self.assertEquals(c.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
-        self.assertEquals(c.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
+        self.assertEqual(c.portable_data_hash(), "d41d8cd98f00b204e9800998ecf8427e+0")
+        self.assertEqual(c.api_response()["portable_data_hash"], "d41d8cd98f00b204e9800998ecf8427e+0" )
 
         with c.open("count.txt", "w") as f:
             f.write("0123456789")
 
-        self.assertEquals(c.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
+        self.assertEqual(c.manifest_text(), ". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\n")
 
         return c
 
     def test_create_and_save(self):
         c = self.create_count_txt()
         c.save()
-        self.assertTrue(re.match(r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count.txt$",
-                                 c.manifest_text()))
+        self.assertRegexpMatches(c.manifest_text(), r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
 
 
     def test_create_and_save_new(self):
         c = self.create_count_txt()
         c.save_new()
-        self.assertTrue(re.match(r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count.txt$",
-                                 c.manifest_text()))
+        self.assertRegexpMatches(c.manifest_text(), r"^\. 781e5e245d69b566979b86e28d23f2c7\+10\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\.txt$",)
 
     def test_create_diff_apply(self):
         c1 = self.create_count_txt()
@@ -1124,7 +1118,7 @@ class CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):
         c2.save()
 
         c1.update()
-        self.assertTrue(re.match(r"\. e65075d550f9b5bf9992fa1d71a131be\+3 7ac66c0f148de9519b8bd264312c4d64\+7\+A[a-f0-9]{40}@[a-f0-9]{8} 0:3:count.txt 3:7:count.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$", c1.manifest_text()))
+        self.assertRegexpMatches(c1.manifest_text(), r"\. e65075d550f9b5bf9992fa1d71a131be\+3 7ac66c0f148de9519b8bd264312c4d64\+7\+A[a-f0-9]{40}@[a-f0-9]{8} 0:3:count\.txt 3:7:count\.txt~conflict-\d\d\d\d-\d\d-\d\d-\d\d:\d\d:\d\d~$")
 
 
 if __name__ == '__main__':
diff --git a/sdk/python/tests/test_keep_locator.py b/sdk/python/tests/test_keep_locator.py
index a7e9cb1..273992a 100644
--- a/sdk/python/tests/test_keep_locator.py
+++ b/sdk/python/tests/test_keep_locator.py
@@ -36,7 +36,7 @@ class ArvadosKeepLocatorTest(unittest.TestCase):
                           (self.sizes(), self.perm_hints())]:
             for loc_data in itertools.izip(self.checksums(), *hint_gens):
                 locator = '+'.join(loc_data)
-                self.assertEquals(locator, str(KeepLocator(locator)))
+                self.assertEqual(locator, str(KeepLocator(locator)))
 
     def test_nonchecksum_rejected(self):
         for badstr in ['', 'badbadbad', '8f9e68d957b504a29ba76c526c3145dj',
@@ -48,7 +48,7 @@ class ArvadosKeepLocatorTest(unittest.TestCase):
         base = next(self.base_locators(1))
         for weirdhint in ['Zfoo', 'Ybar234', 'Xa at b_c-372', 'W99']:
             locator = '+'.join([base, weirdhint])
-            self.assertEquals(locator, str(KeepLocator(locator)))
+            self.assertEqual(locator, str(KeepLocator(locator)))
 
     def test_bad_hints_rejected(self):
         base = next(self.base_locators(1))
@@ -60,7 +60,7 @@ class ArvadosKeepLocatorTest(unittest.TestCase):
         base = next(self.base_locators(1))
         for loc_hints in itertools.permutations(['Kab1cd', 'Kef2gh', 'Kij3kl']):
             locator = '+'.join((base,) + loc_hints)
-            self.assertEquals(locator, str(KeepLocator(locator)))
+            self.assertEqual(locator, str(KeepLocator(locator)))
 
     def test_expiry_passed(self):
         base = next(self.base_locators(1))
diff --git a/sdk/python/tests/test_sdk.py b/sdk/python/tests/test_sdk.py
index d8b37d8..9b9f3fc 100644
--- a/sdk/python/tests/test_sdk.py
+++ b/sdk/python/tests/test_sdk.py
@@ -7,7 +7,7 @@ import arvados.collection
 
 class TestSDK(unittest.TestCase):
 
-    @mock.patch('arvados.apisetup.api_from_config')
+    @mock.patch('arvados.api')
     @mock.patch('arvados.current_task')
     @mock.patch('arvados.current_job')
     def test_one_task_per_input_file_normalize(self, mock_job, mock_task, mock_api):

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list