[ARVADOS] created: 12c9306193a1ec1a427e0ffda3f317a5f9ae091a
git at public.curoverse.com
git at public.curoverse.com
Tue Jan 21 14:20:23 EST 2014
at 12c9306193a1ec1a427e0ffda3f317a5f9ae091a (commit)
commit 12c9306193a1ec1a427e0ffda3f317a5f9ae091a
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jan 21 14:15:08 2014 -0500
Fixed path to glyphicons
diff --git a/doc/css/bootstrap.css b/doc/css/bootstrap.css
index 6b2ba30..03f41e1 100644
--- a/doc/css/bootstrap.css
+++ b/doc/css/bootstrap.css
@@ -2429,7 +2429,7 @@ input[type="submit"].btn.btn-mini {
*margin-right: .3em;
line-height: 14px;
vertical-align: text-top;
- background-image: url("../img/glyphicons-halflings.png");
+ background-image: url("../images/glyphicons-halflings.png");
background-position: 14px 14px;
background-repeat: no-repeat;
margin-top: 1px;
@@ -2452,7 +2452,7 @@ input[type="submit"].btn.btn-mini {
.dropdown-submenu:focus > a > [class^="icon-"],
.dropdown-submenu:hover > a > [class*=" icon-"],
.dropdown-submenu:focus > a > [class*=" icon-"] {
- background-image: url("../img/glyphicons-halflings-white.png");
+ background-image: url("../images/glyphicons-halflings-white.png");
}
.icon-glass {
background-position: 0 0;
commit f34faaa3ee1e9fc6780b6bcdd19ee1d21d410be0
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jan 21 14:06:35 2014 -0500
Moved Python SDK and crunch utility scripts pages to SDK Reference section.
diff --git a/doc/_config.yml b/doc/_config.yml
index a4fed53..274ae9b 100644
--- a/doc/_config.yml
+++ b/doc/_config.yml
@@ -10,3 +10,5 @@ navbar:
- Concepts
- API Methods
- Schema
+ sdk:
+ - Python
\ No newline at end of file
diff --git a/doc/_includes/navbar_left.html b/doc/_includes/navbar_left.html
index d593ff3..f74606e 100644
--- a/doc/_includes/navbar_left.html
+++ b/doc/_includes/navbar_left.html
@@ -1,7 +1,7 @@
<div class="span3">
<div class="affix-top">
<div class="well sidebar-nav">
- {% if page.navsection == 'userguide' or page.navsection == 'api' %}
+ {% if page.navsection == 'userguide' or page.navsection == 'api' or page.navsection == 'sdk' %}
<ol class="nav nav-list">
{% for menu_item in site.navbar[page.navsection] %}
<li>{{ menu_item }}
diff --git a/doc/_includes/navbar_top.html b/doc/_includes/navbar_top.html
index bdcc4ce..a5f629b 100644
--- a/doc/_includes/navbar_top.html
+++ b/doc/_includes/navbar_top.html
@@ -10,13 +10,7 @@
<div class="nav-collapse collapse">
<ul class="nav">
<li {% if page.navsection == 'userguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/user/">User Guide</a></li>
-
- <li {% if page.navsection == 'sdk' %} class="active dropdown" {% else %} class="dropdown" {% endif %}>
- <a href="#" class="dropdown-toggle" data-toggle="dropdown">SDK Reference <b class="caret"></b></a>
- <ul class="dropdown-menu">
- <li><a href="{{ site.baseurl }}/sdk/python.html">Python SDK</a></li>
- </ul>
- </li>
+ <li {% if page.navsection == 'sdk' %} class="active" {% endif %}><a href="{{ site.baseurl }}/sdk/">SDK Reference</a></li>
<li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/">API Reference</a></li>
<li {% if page.navsection == 'adminguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/">Admin Guide</a></li>
<li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/">Install Guide</a></li>
diff --git a/doc/_layouts/default.html b/doc/_layouts/default.html
index defa41d..4e2f08a 100644
--- a/doc/_layouts/default.html
+++ b/doc/_layouts/default.html
@@ -15,7 +15,8 @@
}
body {
padding-top: 41px;
- height: calc(100% - 46px);
+ height: 90%; /* If calc() is not supported */
+ height: calc(100% - 46px); /* Sets the body full height minus the padding for the menu bar */
}
div.frontpagehero {
background: #fff;
diff --git a/doc/images/glyphicons-halflings-white.png b/doc/images/glyphicons-halflings-white.png
new file mode 100644
index 0000000..3bf6484
Binary files /dev/null and b/doc/images/glyphicons-halflings-white.png differ
diff --git a/doc/images/glyphicons-halflings.png b/doc/images/glyphicons-halflings.png
new file mode 100644
index 0000000..a996999
Binary files /dev/null and b/doc/images/glyphicons-halflings.png differ
diff --git a/doc/index.html b/doc/index.html
index 3ff2956..af280c0 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -24,13 +24,16 @@ title: Arvados | Documentation
<div style="width: 50px; display: table-cell; border-left:1px solid #bbb;">
</div>
- <div style="margin-left: 0px; width: 450px; display: table-cell;">
+ <div style="margin-left: 0px; width: auto; display: table-cell;">
<div>
<p>
<a href="{{ site.baseurl }}/user/">User Guide</a> — How to manage data and do analysis with Arvados.
</p>
<p>
- <a href="{{ site.baseurl }}/api/">API Reference</a> — Details about the Arvados APIs.
+ <a href="{{ site.baseurl }}/sdk/">SDK Reference</a> — Details about the accessing Arvados from various programming languages.
+ </p>
+ <p>
+ <a href="{{ site.baseurl }}/api/">API Reference</a> — Details about the the Arvados REST API.
</p>
<p>
<a href="{{ site.baseurl }}/admin/">Admin Guide</a> — How to administer an Arvados system.
diff --git a/doc/mkpydoc.sh b/doc/mkpydoc.sh
index 0fa7aae..27be8c7 100755
--- a/doc/mkpydoc.sh
+++ b/doc/mkpydoc.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-mkdir -p _site/sdk/python
-cd _site/sdk/python
+mkdir -p _site/sdk/python/arvados
+cd _site/sdk/python/arvados
epydoc --html -o . "arvados"
diff --git a/doc/sdk/index.textile b/doc/sdk/index.textile
new file mode 100644
index 0000000..4224b59
--- /dev/null
+++ b/doc/sdk/index.textile
@@ -0,0 +1,12 @@
+---
+layout: default
+navsection: sdk
+title: "SDK Reference"
+navorder: 0
+---
+
+h1. Arvados SDK Reference
+
+This section documents how to access the Arvados API and Keep using various programming languages.
+
+* "Python SDK":python/sdk-python.html
diff --git a/doc/sdk/python.textile b/doc/sdk/python.textile
deleted file mode 100644
index a23012f..0000000
--- a/doc/sdk/python.textile
+++ /dev/null
@@ -1,9 +0,0 @@
----
-layout: default
-navsection: sdk
-title: Arvados Software Development Kit (SDK)
-navorder: 0
-no_nav_left: true
----
-
-notextile. <iframe src="python/" style="width:100%; height:100%; border:none" />
diff --git a/doc/user/reference/crunch-utility-libraries.textile b/doc/sdk/python/crunch-utility-libraries.textile
similarity index 91%
rename from doc/user/reference/crunch-utility-libraries.textile
rename to doc/sdk/python/crunch-utility-libraries.textile
index 524040f..95a3780 100644
--- a/doc/user/reference/crunch-utility-libraries.textile
+++ b/doc/sdk/python/crunch-utility-libraries.textile
@@ -1,20 +1,21 @@
---
layout: default
-navsection: userguide
-navmenu: Reference
+navsection: sdk
+navmenu: Python
title: "Crunch utility libraries"
-navorder: 31
+navorder: 20
---
h1. Crunch utility libraries
Several utility libraries are included with Arvados. They are intended to make it quicker and easier to write your own crunch scripts.
-h4. Python SDK extras
+* "Python SDK extras":#pythonsdk
+* "Toolkit wrappers":#toolkit_wrappers
-The Python SDK adds some convenience features that are particularly useful in crunch scripts, in addition to the standard set of API calls.
+h2(#pythonsdk). Python SDK extras
-<div class="offset1">
+The Python SDK adds some convenience features that are particularly useful in crunch scripts, in addition to the standard set of API calls.
In a crunch job, the environment variables @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ will be set up so the job has the privileges of the user who submitted the job.
@@ -25,7 +26,7 @@ my_user = arvados.api().users().current().execute()
my_uuid = my_user['uuid']
</pre>
-h4. Get the current job and task parameters
+h3. Get the current job and task parameters
@arvados.current_job()@ and @arvados.current_task()@ are convenient ways to retrieve the current Job and Task, using the @JOB_UUID@ and @TASK_UUID@ environment variables provided to each crunch task process.
@@ -36,7 +37,7 @@ this_job_input = this_job['script_parameters']['input']
this_task_input = this_task['parameters']['input']
</pre>
-h4(#one_task_per_input). Queue a task for each input file
+h3(#one_task_per_input). Queue a task for each input file
A common pattern for a crunch job is to run one task to scan the input, and one task per input file to do the work.
@@ -61,7 +62,7 @@ arvados.job_setup.one_task_per_input_file(if_sequence=0, and_end_task=True)
my_input = this_task['parameters']['input']
</pre>
-h4. Set the current task's output and success flag
+h3. Set the current task's output and success flag
Each task in a crunch job must make an API call to record its output and set its @success@ attribute to True. The object returned by @current_task()@ has a @set_output()@ method to make the process more succinct.
@@ -69,13 +70,10 @@ Each task in a crunch job must make an API call to record its output and set its
arvados.current_task().set_output(my_output_locator)
</pre>
-</div>
-
-h4. arvados_ipc.py
+h3. arvados_ipc.py
Manage child processes and FIFOs (pipes).
-<div class="offset1">
This module makes it easier to check the exit status of every child process you start, and close the unused end of each FIFO at the appropriate time.
@@ -102,18 +100,14 @@ if not waitpid_and_check_children(children):
The "crunch scripts" included with Arvados include some more examples of using the arvados_ipc module.
-</div>
-
-h3. Toolkit wrappers
+h2(#toolkit_wrappers). Toolkit wrappers
The following *arvados-∗.py* modules provide "extract, build, run" helpers to make it easy to incorporate common analysis tools in your crunch scripts.
-h4. arvados_bwa.py
+h3. arvados_bwa.py
Build and run the "bwa":http://bio-bwa.sourceforge.net/bwa.shtml program.
-<div class="offset1">
-
The module retrieves the bwa source code from Keep, using the job's @bwa_tbz@ parameter.
<pre>
@@ -135,14 +129,10 @@ On qr1hi.arvadosapi.com, the source distribution @bwa-0.7.5a.tar.bz2@ is availab
}
</pre>
-</div>
-
-h4. arvados_gatk2.py
+h3. arvados_gatk2.py
Extract and run the "Genome Analysis Toolkit":http://www.broadinstitute.org/gatk/ programs.
-<div class="offset1">
-
The module retrieves the binary distribution tarball from Keep, using the job's @gatk_tbz@ parameter.
<pre>
@@ -171,13 +161,10 @@ The GATK data bundle is available in the collection @d237a90bae3870b3b033aea1e99
}
</pre>
-</div>
-
-h4. arvados_samtools.py
+h3. arvados_samtools.py
Build and run the "samtools":http://samtools.sourceforge.net/samtools.shtml program.
-<div class="offset1">
The module retrieves the samtools source code from Keep, using the job's @samtools_tgz@ parameter.
@@ -200,13 +187,11 @@ On qr1hi.arvadosapi.com, the source distribution @samtools-0.1.19.tar.gz@ is ava
}
</pre>
-</div>
-h4. arvados_picard.py
+h3. arvados_picard.py
Build and run the "picard":http://picard.sourceforge.net/command-line-overview.shtml program.
-<div class="offset1">
The module retrieves the picard binary distribution from Keep, using the job's @picard_zip@ parameter.
@@ -237,5 +222,4 @@ On qr1hi.arvadosapi.com, the binary distribution @picard-tools-1.82.zip@ is avai
}
</pre>
-</div>
diff --git a/doc/sdk/python/python.textile b/doc/sdk/python/python.textile
new file mode 100644
index 0000000..4d6900b
--- /dev/null
+++ b/doc/sdk/python/python.textile
@@ -0,0 +1,10 @@
+---
+layout: default
+navsection: sdk
+navmenu: Python
+title: "PyDoc Reference"
+navorder: 30
+no_nav_left: true
+---
+
+notextile. <iframe src="arvados/" style="width:100%; height:100%; border:none" />
diff --git a/doc/user/reference/sdk-python.textile b/doc/sdk/python/sdk-python.textile
similarity index 95%
rename from doc/user/reference/sdk-python.textile
rename to doc/sdk/python/sdk-python.textile
index 288a9d5..81a61f0 100644
--- a/doc/user/reference/sdk-python.textile
+++ b/doc/sdk/python/sdk-python.textile
@@ -1,12 +1,12 @@
---
layout: default
-navsection: userguide
-navmenu: Reference
+navsection: sdk
+navmenu: Python
title: "Python SDK"
-navorder: 23
+navorder: 10
---
-h1. Reference: Python SDK
+h1. Python SDK
The Python SDK provides a generic set of wrappers so you can make API calls easily. It performs some validation before connecting to the API server: for example, it refuses to do an API call if a required parameter is missing.
@@ -26,7 +26,7 @@ $ <code class="userinput">sudo python setup.py install</code>
</pre>
</notextile>
-If the SDK is installed and your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":api-tokens.html for details), @import arvados@ should produce no errors:
+If the SDK is installed and your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see "api-tokens":{{site.basedoc}}/user/reference/api-tokens.html for details), @import arvados@ should produce no errors:
<notextile>
<pre>$ <code class="userinput">python</code>
diff --git a/doc/user/reference/sdk-cli.textile b/doc/user/reference/sdk-cli.textile
index 17d2a3c..058f1b3 100644
--- a/doc/user/reference/sdk-cli.textile
+++ b/doc/user/reference/sdk-cli.textile
@@ -2,11 +2,11 @@
layout: default
navsection: userguide
navmenu: Reference
-title: "Command line SDK"
+title: "Command line interface"
navorder: 22
---
-h1. Reference: Command line SDK
+h1. Reference: Command Line Interface
If you are logged in to an Arvados VM, the command line SDK should be installed. Try:
diff --git a/doc/user/tutorials/tutorial-parallel.textile b/doc/user/tutorials/tutorial-parallel.textile
index b76d1a5..fb70eda 100644
--- a/doc/user/tutorials/tutorial-parallel.textile
+++ b/doc/user/tutorials/tutorial-parallel.textile
@@ -78,4 +78,4 @@ $ <span class="userinput">arv keep get e2ccd204bca37c77c0ba59fc470cd0f7+162/md5s
h2. The one job per file pattern
-This example demonstrates how to schedule a new task per file. Because this is a common pattern, the Crunch Python API contains a convenience function to "queue a task for each input file":{{site.basedoc}}/user/reference/crunch-utility-libraries.html#one_task_per_input which reduces the amount of boilerplate code required to handle parallel jobs.
+This example demonstrates how to schedule a new task per file. Because this is a common pattern, the Crunch Python API contains a convenience function to "queue a task for each input file":{{site.basedoc}}/sdk/python/crunch-utility-libraries.html#one_task_per_input which reduces the amount of boilerplate code required to handle parallel jobs.
commit 2e4fc5b7ec286d2beee8f93262dd70e481d161e2
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jan 21 11:51:34 2014 -0500
Script to build python documentation (requires epydoc)
diff --git a/doc/mkpydoc.sh b/doc/mkpydoc.sh
new file mode 100755
index 0000000..0fa7aae
--- /dev/null
+++ b/doc/mkpydoc.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+mkdir -p _site/sdk/python
+cd _site/sdk/python
+epydoc --html -o . "arvados"
commit e73f2804198ccd6d44e38312f3d4385dbd9b13ae
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jan 21 11:45:40 2014 -0500
Added Python SDK section to documentation
diff --git a/doc/_includes/navbar_top.html b/doc/_includes/navbar_top.html
index a5f629b..bdcc4ce 100644
--- a/doc/_includes/navbar_top.html
+++ b/doc/_includes/navbar_top.html
@@ -10,7 +10,13 @@
<div class="nav-collapse collapse">
<ul class="nav">
<li {% if page.navsection == 'userguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/user/">User Guide</a></li>
- <li {% if page.navsection == 'sdk' %} class="active" {% endif %}><a href="{{ site.baseurl }}/sdk/">SDK Reference</a></li>
+
+ <li {% if page.navsection == 'sdk' %} class="active dropdown" {% else %} class="dropdown" {% endif %}>
+ <a href="#" class="dropdown-toggle" data-toggle="dropdown">SDK Reference <b class="caret"></b></a>
+ <ul class="dropdown-menu">
+ <li><a href="{{ site.baseurl }}/sdk/python.html">Python SDK</a></li>
+ </ul>
+ </li>
<li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/">API Reference</a></li>
<li {% if page.navsection == 'adminguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/">Admin Guide</a></li>
<li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/">Install Guide</a></li>
diff --git a/doc/_layouts/default.html b/doc/_layouts/default.html
index a7b5610..defa41d 100644
--- a/doc/_layouts/default.html
+++ b/doc/_layouts/default.html
@@ -10,8 +10,12 @@
<link rel="shortcut icon" href="{{ site.baseurl }}/images/favicon.ico" type="image/x-icon">
<link href="{{ site.baseurl }}/css/bootstrap.css" rel="stylesheet">
<style>
+ html {
+ height:100%;
+ }
body {
padding-top: 41px;
+ height: calc(100% - 46px);
}
div.frontpagehero {
background: #fff;
@@ -73,7 +77,7 @@
<body class="nopad">
{% include navbar_top.html %}
- {% if page.navsection == 'top' %}
+ {% if page.navsection == 'top' or page.no_nav_left %}
{{ content }}
{% else %}
diff --git a/doc/sdk/index.textile b/doc/sdk/python.textile
similarity index 50%
rename from doc/sdk/index.textile
rename to doc/sdk/python.textile
index 614e100..a23012f 100644
--- a/doc/sdk/index.textile
+++ b/doc/sdk/python.textile
@@ -3,6 +3,7 @@ layout: default
navsection: sdk
title: Arvados Software Development Kit (SDK)
navorder: 0
+no_nav_left: true
---
-notextile. <iframe src="python/" style="width:100%; height:100%" />
+notextile. <iframe src="python/" style="width:100%; height:100%; border:none" />
commit dd20d6a93d7ec92e477b3d957da8f984cfbfdff9
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jan 21 10:15:48 2014 -0500
Renamed Keep/Stream/Collection submodules to keep/stream/collection (lower case)
diff --git a/doc/_includes/navbar_top.html b/doc/_includes/navbar_top.html
index fc2098d..a5f629b 100644
--- a/doc/_includes/navbar_top.html
+++ b/doc/_includes/navbar_top.html
@@ -10,6 +10,7 @@
<div class="nav-collapse collapse">
<ul class="nav">
<li {% if page.navsection == 'userguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/user/">User Guide</a></li>
+ <li {% if page.navsection == 'sdk' %} class="active" {% endif %}><a href="{{ site.baseurl }}/sdk/">SDK Reference</a></li>
<li {% if page.navsection == 'api' %} class="active" {% endif %}><a href="{{ site.baseurl }}/api/">API Reference</a></li>
<li {% if page.navsection == 'adminguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/admin/">Admin Guide</a></li>
<li {% if page.navsection == 'installguide' %} class="active" {% endif %}><a href="{{ site.baseurl }}/install/">Install Guide</a></li>
diff --git a/doc/sdk/index.textile b/doc/sdk/index.textile
new file mode 100644
index 0000000..614e100
--- /dev/null
+++ b/doc/sdk/index.textile
@@ -0,0 +1,8 @@
+---
+layout: default
+navsection: sdk
+title: Arvados Software Development Kit (SDK)
+navorder: 0
+---
+
+notextile. <iframe src="python/" style="width:100%; height:100%" />
diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
index b99389a..484d9ed 100644
--- a/sdk/python/arvados/__init__.py
+++ b/sdk/python/arvados/__init__.py
@@ -21,10 +21,13 @@ import threading
import apiclient
import apiclient.discovery
-from Stream import *
-from Collection import *
-from Keep import *
+from stream import *
+from collection import *
+from keep import *
+config = None
+EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
+services = {}
# Arvados configuration settings are taken from $HOME/.config/arvados.
# Environment variables override settings in the config file.
@@ -41,16 +44,6 @@ class ArvadosConfig(dict):
if var.startswith('ARVADOS_'):
self[var] = os.environ[var]
-
-config = ArvadosConfig(os.environ['HOME'] + '/.config/arvados')
-
-if 'ARVADOS_DEBUG' in config:
- logging.basicConfig(level=logging.DEBUG)
-
-EMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'
-
-services = {}
-
class errors:
class SyntaxError(Exception):
pass
@@ -136,6 +129,12 @@ apiclient.discovery._cast = _cast_objects_too
def api(version=None):
global services, config
+
+ if not config:
+ config = ArvadosConfig(os.environ['HOME'] + '/.config/arvados')
+ if 'ARVADOS_DEBUG' in config:
+ logging.basicConfig(level=logging.DEBUG)
+
if not services.get(version):
apiVersion = version
if not version:
@@ -523,8 +522,3 @@ class util:
allfiles += [ent_base]
return allfiles
-
-
-# We really shouldn't do this but some clients still use
-# arvados.service.* directly instead of arvados.api().*
-service = api()
diff --git a/sdk/python/arvados/Collection.py b/sdk/python/arvados/collection.py
similarity index 99%
rename from sdk/python/arvados/Collection.py
rename to sdk/python/arvados/collection.py
index ffc1bbf..682bbf0 100644
--- a/sdk/python/arvados/Collection.py
+++ b/sdk/python/arvados/collection.py
@@ -27,10 +27,13 @@ class CollectionReader(object):
self._manifest_locator = manifest_locator_or_text
self._manifest_text = None
self._streams = None
+
def __enter__(self):
pass
+
def __exit__(self):
pass
+
def _populate(self):
if self._streams != None:
return
@@ -41,22 +44,26 @@ class CollectionReader(object):
if stream_line != '':
stream_tokens = stream_line.split()
self._streams += [stream_tokens]
+
def all_streams(self):
self._populate()
resp = []
for s in self._streams:
resp += [StreamReader(s)]
return resp
+
def all_files(self):
for s in self.all_streams():
for f in s.all_files():
yield f
+
def manifest_text(self):
self._populate()
return self._manifest_text
class CollectionWriter(object):
KEEP_BLOCK_SIZE = 2**26
+
def __init__(self):
self._data_buffer = []
self._data_buffer_len = 0
@@ -67,10 +74,13 @@ class CollectionWriter(object):
self._current_file_name = None
self._current_file_pos = 0
self._finished_streams = []
+
def __enter__(self):
pass
+
def __exit__(self):
self.finish()
+
def write_directory_tree(self,
path, stream_name='.', max_manifest_depth=-1):
self.start_new_stream(stream_name)
@@ -106,23 +116,28 @@ class CollectionWriter(object):
self._current_stream_length += len(newdata)
while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
self.flush_data()
+
def flush_data(self):
data_buffer = ''.join(self._data_buffer)
if data_buffer != '':
self._current_stream_locators += [Keep.put(data_buffer[0:self.KEEP_BLOCK_SIZE])]
self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
self._data_buffer_len = len(self._data_buffer[0])
+
def start_new_file(self, newfilename=None):
self.finish_current_file()
self.set_current_file_name(newfilename)
+
def set_current_file_name(self, newfilename):
if re.search(r'[\t\n]', newfilename):
raise errors.AssertionError(
"Manifest filenames cannot contain whitespace: %s" %
newfilename)
self._current_file_name = newfilename
+
def current_file_name(self):
return self._current_file_name
+
def finish_current_file(self):
if self._current_file_name == None:
if self._current_file_pos == self._current_stream_length:
@@ -137,16 +152,20 @@ class CollectionWriter(object):
self._current_stream_length - self._current_file_pos,
self._current_file_name]]
self._current_file_pos = self._current_stream_length
+
def start_new_stream(self, newstreamname='.'):
self.finish_current_stream()
self.set_current_stream_name(newstreamname)
+
def set_current_stream_name(self, newstreamname):
if re.search(r'[\t\n]', newstreamname):
raise errors.AssertionError(
"Manifest stream names cannot contain whitespace")
self._current_stream_name = '.' if newstreamname=='' else newstreamname
+
def current_stream_name(self):
return self._current_stream_name
+
def finish_current_stream(self):
self.finish_current_file()
self.flush_data()
@@ -168,8 +187,10 @@ class CollectionWriter(object):
self._current_stream_name = None
self._current_file_pos = 0
self._current_file_name = None
+
def finish(self):
return Keep.put(self.manifest_text())
+
def manifest_text(self):
self.finish_current_stream()
manifest = ''
@@ -183,6 +204,7 @@ class CollectionWriter(object):
manifest += " %d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040'))
manifest += "\n"
return manifest
+
def data_locators(self):
ret = []
for name, locators, files in self._finished_streams:
diff --git a/sdk/python/arvados/Keep.py b/sdk/python/arvados/keep.py
similarity index 99%
rename from sdk/python/arvados/Keep.py
rename to sdk/python/arvados/keep.py
index 48673ff..055da62 100644
--- a/sdk/python/arvados/Keep.py
+++ b/sdk/python/arvados/keep.py
@@ -52,11 +52,14 @@ class KeepClient(object):
self._done = 0
self._todo_lock = threading.Semaphore(todo)
self._done_lock = threading.Lock()
+
def __enter__(self):
self._todo_lock.acquire()
return self
+
def __exit__(self, type, value, traceback):
self._todo_lock.release()
+
def shall_i_proceed(self):
"""
Return true if the current thread should do stuff. Return
@@ -64,12 +67,14 @@ class KeepClient(object):
"""
with self._done_lock:
return (self._done < self._todo)
+
def increment_done(self):
"""
Report that the current thread was successful.
"""
with self._done_lock:
self._done += 1
+
def done(self):
"""
Return how many successes were reported.
@@ -86,6 +91,7 @@ class KeepClient(object):
def __init__(self, **kwargs):
super(KeepClient.KeepWriterThread, self).__init__()
self.args = kwargs
+
def run(self):
global config
with self.args['thread_limiter'] as limiter:
@@ -230,6 +236,7 @@ class KeepClient(object):
os.rename(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'),
os.path.join(os.environ['KEEP_LOCAL_STORE'], md5))
return locator
+
@staticmethod
def local_store_get(locator):
r = re.search('^([0-9a-f]{32,})', locator)
diff --git a/sdk/python/arvados/Stream.py b/sdk/python/arvados/stream.py
similarity index 99%
rename from sdk/python/arvados/Stream.py
rename to sdk/python/arvados/stream.py
index b62b359..5dd8a89 100644
--- a/sdk/python/arvados/Stream.py
+++ b/sdk/python/arvados/stream.py
@@ -25,37 +25,46 @@ class StreamFileReader(object):
self._size = size
self._name = name
self._filepos = 0
+
def name(self):
return self._name
+
def decompressed_name(self):
return re.sub('\.(bz2|gz)$', '', self._name)
+
def size(self):
return self._size
+
def stream_name(self):
return self._stream.name()
+
def read(self, size, **kwargs):
self._stream.seek(self._pos + self._filepos)
data = self._stream.read(min(size, self._size - self._filepos))
self._filepos += len(data)
return data
+
def readall(self, size=2**20, **kwargs):
while True:
data = self.read(size, **kwargs)
if data == '':
break
yield data
+
def bunzip2(self, size):
decompressor = bz2.BZ2Decompressor()
for chunk in self.readall(size):
data = decompressor.decompress(chunk)
if data and data != '':
yield data
+
def gunzip(self, size):
decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
for chunk in self.readall(size):
data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
if data and data != '':
yield data
+
def readall_decompressed(self, size=2**20):
self._stream.seek(self._pos + self._filepos)
if re.search('\.bz2$', self._name):
@@ -64,6 +73,7 @@ class StreamFileReader(object):
return self.gunzip(size)
else:
return self.readall(size)
+
def readlines(self, decompress=True):
if decompress:
datasource = self.readall_decompressed()
@@ -83,6 +93,7 @@ class StreamFileReader(object):
data = data[sol:]
if data != '':
yield data
+
def as_manifest(self):
if self.size() == 0:
return ("%s %s 0:0:%s\n"
@@ -115,6 +126,7 @@ class StreamReader(object):
def tokens(self):
return self._tokens
+
def tokens_for_range(self, range_start, range_size):
resp = [self._stream_name]
return_all_tokens = False
@@ -143,12 +155,15 @@ class StreamReader(object):
f[1] > 0):
resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
return resp
+
def name(self):
return self._stream_name
+
def all_files(self):
for f in self.files:
pos, size, name = f
yield StreamFileReader(self, pos, size, name)
+
def nextdatablock(self):
if self._current_datablock_index < 0:
self._current_datablock_pos = 0
@@ -157,10 +172,12 @@ class StreamReader(object):
self._current_datablock_pos += self.current_datablock_size()
self._current_datablock_index += 1
self._current_datablock_data = None
+
def current_datablock_data(self):
if self._current_datablock_data == None:
self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
return self._current_datablock_data
+
def current_datablock_size(self):
if self._current_datablock_index < 0:
self.nextdatablock()
@@ -168,9 +185,11 @@ class StreamReader(object):
if sizehint:
return int(sizehint.group(0))
return len(self.current_datablock_data())
+
def seek(self, pos):
"""Set the position of the next read operation."""
self._pos = pos
+
def really_seek(self):
"""Find and load the appropriate data block, so the byte at
_pos is in memory.
@@ -187,6 +206,7 @@ class StreamReader(object):
while (self._pos > self._current_datablock_pos and
self._pos > self._current_datablock_pos + self.current_datablock_size()):
self.nextdatablock()
+
def read(self, size):
"""Read no more than size bytes -- but at least one byte,
unless _pos is already at the end of the stream.
commit 86ec3f61b905b2887cf7158ac97a4b817492ebad
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jan 21 09:06:00 2014 -0500
Moving Collection, Stream and Keep classes into their own files
diff --git a/sdk/python/arvados/Collection.py b/sdk/python/arvados/Collection.py
new file mode 100644
index 0000000..ffc1bbf
--- /dev/null
+++ b/sdk/python/arvados/Collection.py
@@ -0,0 +1,190 @@
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+class CollectionReader(object):
+ def __init__(self, manifest_locator_or_text):
+ if re.search(r'^\S+( [a-f0-9]{32,}(\+\S+)*)+( \d+:\d+:\S+)+\n', manifest_locator_or_text):
+ self._manifest_text = manifest_locator_or_text
+ self._manifest_locator = None
+ else:
+ self._manifest_locator = manifest_locator_or_text
+ self._manifest_text = None
+ self._streams = None
+ def __enter__(self):
+ pass
+ def __exit__(self):
+ pass
+ def _populate(self):
+ if self._streams != None:
+ return
+ if not self._manifest_text:
+ self._manifest_text = Keep.get(self._manifest_locator)
+ self._streams = []
+ for stream_line in self._manifest_text.split("\n"):
+ if stream_line != '':
+ stream_tokens = stream_line.split()
+ self._streams += [stream_tokens]
+ def all_streams(self):
+ self._populate()
+ resp = []
+ for s in self._streams:
+ resp += [StreamReader(s)]
+ return resp
+ def all_files(self):
+ for s in self.all_streams():
+ for f in s.all_files():
+ yield f
+ def manifest_text(self):
+ self._populate()
+ return self._manifest_text
+
+class CollectionWriter(object):
+ KEEP_BLOCK_SIZE = 2**26
+ def __init__(self):
+ self._data_buffer = []
+ self._data_buffer_len = 0
+ self._current_stream_files = []
+ self._current_stream_length = 0
+ self._current_stream_locators = []
+ self._current_stream_name = '.'
+ self._current_file_name = None
+ self._current_file_pos = 0
+ self._finished_streams = []
+ def __enter__(self):
+ pass
+ def __exit__(self):
+ self.finish()
+ def write_directory_tree(self,
+ path, stream_name='.', max_manifest_depth=-1):
+ self.start_new_stream(stream_name)
+ todo = []
+ if max_manifest_depth == 0:
+ dirents = sorted(util.listdir_recursive(path))
+ else:
+ dirents = sorted(os.listdir(path))
+ for dirent in dirents:
+ target = os.path.join(path, dirent)
+ if os.path.isdir(target):
+ todo += [[target,
+ os.path.join(stream_name, dirent),
+ max_manifest_depth-1]]
+ else:
+ self.start_new_file(dirent)
+ with open(target, 'rb') as f:
+ while True:
+ buf = f.read(2**26)
+ if len(buf) == 0:
+ break
+ self.write(buf)
+ self.finish_current_stream()
+ map(lambda x: self.write_directory_tree(*x), todo)
+
+ def write(self, newdata):
+ if hasattr(newdata, '__iter__'):
+ for s in newdata:
+ self.write(s)
+ return
+ self._data_buffer += [newdata]
+ self._data_buffer_len += len(newdata)
+ self._current_stream_length += len(newdata)
+ while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
+ self.flush_data()
+ def flush_data(self):
+ data_buffer = ''.join(self._data_buffer)
+ if data_buffer != '':
+ self._current_stream_locators += [Keep.put(data_buffer[0:self.KEEP_BLOCK_SIZE])]
+ self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
+ self._data_buffer_len = len(self._data_buffer[0])
+ def start_new_file(self, newfilename=None):
+ self.finish_current_file()
+ self.set_current_file_name(newfilename)
+ def set_current_file_name(self, newfilename):
+ if re.search(r'[\t\n]', newfilename):
+ raise errors.AssertionError(
+ "Manifest filenames cannot contain whitespace: %s" %
+ newfilename)
+ self._current_file_name = newfilename
+ def current_file_name(self):
+ return self._current_file_name
+ def finish_current_file(self):
+ if self._current_file_name == None:
+ if self._current_file_pos == self._current_stream_length:
+ return
+ raise errors.AssertionError(
+ "Cannot finish an unnamed file " +
+ "(%d bytes at offset %d in '%s' stream)" %
+ (self._current_stream_length - self._current_file_pos,
+ self._current_file_pos,
+ self._current_stream_name))
+ self._current_stream_files += [[self._current_file_pos,
+ self._current_stream_length - self._current_file_pos,
+ self._current_file_name]]
+ self._current_file_pos = self._current_stream_length
+ def start_new_stream(self, newstreamname='.'):
+ self.finish_current_stream()
+ self.set_current_stream_name(newstreamname)
+ def set_current_stream_name(self, newstreamname):
+ if re.search(r'[\t\n]', newstreamname):
+ raise errors.AssertionError(
+ "Manifest stream names cannot contain whitespace")
+ self._current_stream_name = '.' if newstreamname=='' else newstreamname
+ def current_stream_name(self):
+ return self._current_stream_name
+ def finish_current_stream(self):
+ self.finish_current_file()
+ self.flush_data()
+ if len(self._current_stream_files) == 0:
+ pass
+ elif self._current_stream_name == None:
+ raise errors.AssertionError(
+ "Cannot finish an unnamed stream (%d bytes in %d files)" %
+ (self._current_stream_length, len(self._current_stream_files)))
+ else:
+ if len(self._current_stream_locators) == 0:
+ self._current_stream_locators += [EMPTY_BLOCK_LOCATOR]
+ self._finished_streams += [[self._current_stream_name,
+ self._current_stream_locators,
+ self._current_stream_files]]
+ self._current_stream_files = []
+ self._current_stream_length = 0
+ self._current_stream_locators = []
+ self._current_stream_name = None
+ self._current_file_pos = 0
+ self._current_file_name = None
+ def finish(self):
+ return Keep.put(self.manifest_text())
+ def manifest_text(self):
+ self.finish_current_stream()
+ manifest = ''
+ for stream in self._finished_streams:
+ if not re.search(r'^\.(/.*)?$', stream[0]):
+ manifest += './'
+ manifest += stream[0].replace(' ', '\\040')
+ for locator in stream[1]:
+ manifest += " %s" % locator
+ for sfile in stream[2]:
+ manifest += " %d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040'))
+ manifest += "\n"
+ return manifest
+ def data_locators(self):
+ ret = []
+ for name, locators, files in self._finished_streams:
+ ret += locators
+ return ret
diff --git a/sdk/python/arvados/Keep.py b/sdk/python/arvados/Keep.py
new file mode 100644
index 0000000..48673ff
--- /dev/null
+++ b/sdk/python/arvados/Keep.py
@@ -0,0 +1,242 @@
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+global_client_object = None
+
+class Keep:
+ @staticmethod
+ def global_client_object():
+ global global_client_object
+ if global_client_object == None:
+ global_client_object = KeepClient()
+ return global_client_object
+
+ @staticmethod
+ def get(locator, **kwargs):
+ return Keep.global_client_object().get(locator, **kwargs)
+
+ @staticmethod
+ def put(data, **kwargs):
+ return Keep.global_client_object().put(data, **kwargs)
+
+class KeepClient(object):
+
+ class ThreadLimiter(object):
+ """
+ Limit the number of threads running at a given time to
+ {desired successes} minus {successes reported}. When successes
+ reported == desired, wake up the remaining threads and tell
+ them to quit.
+
+ Should be used in a "with" block.
+ """
+ def __init__(self, todo):
+ self._todo = todo
+ self._done = 0
+ self._todo_lock = threading.Semaphore(todo)
+ self._done_lock = threading.Lock()
+ def __enter__(self):
+ self._todo_lock.acquire()
+ return self
+ def __exit__(self, type, value, traceback):
+ self._todo_lock.release()
+ def shall_i_proceed(self):
+ """
+ Return true if the current thread should do stuff. Return
+ false if the current thread should just stop.
+ """
+ with self._done_lock:
+ return (self._done < self._todo)
+ def increment_done(self):
+ """
+ Report that the current thread was successful.
+ """
+ with self._done_lock:
+ self._done += 1
+ def done(self):
+ """
+ Return how many successes were reported.
+ """
+ with self._done_lock:
+ return self._done
+
+ class KeepWriterThread(threading.Thread):
+ """
+ Write a blob of data to the given Keep server. Call
+ increment_done() of the given ThreadLimiter if the write
+ succeeds.
+ """
+ def __init__(self, **kwargs):
+ super(KeepClient.KeepWriterThread, self).__init__()
+ self.args = kwargs
+ def run(self):
+ global config
+ with self.args['thread_limiter'] as limiter:
+ if not limiter.shall_i_proceed():
+ # My turn arrived, but the job has been done without
+ # me.
+ return
+ logging.debug("KeepWriterThread %s proceeding %s %s" %
+ (str(threading.current_thread()),
+ self.args['data_hash'],
+ self.args['service_root']))
+ h = httplib2.Http()
+ url = self.args['service_root'] + self.args['data_hash']
+ api_token = config['ARVADOS_API_TOKEN']
+ headers = {'Authorization': "OAuth2 %s" % api_token}
+ try:
+ resp, content = h.request(url.encode('utf-8'), 'PUT',
+ headers=headers,
+ body=self.args['data'])
+ if (resp['status'] == '401' and
+ re.match(r'Timestamp verification failed', content)):
+ body = KeepClient.sign_for_old_server(
+ self.args['data_hash'],
+ self.args['data'])
+ h = httplib2.Http()
+ resp, content = h.request(url.encode('utf-8'), 'PUT',
+ headers=headers,
+ body=body)
+ if re.match(r'^2\d\d$', resp['status']):
+ logging.debug("KeepWriterThread %s succeeded %s %s" %
+ (str(threading.current_thread()),
+ self.args['data_hash'],
+ self.args['service_root']))
+ return limiter.increment_done()
+ logging.warning("Request fail: PUT %s => %s %s" %
+ (url, resp['status'], content))
+ except (httplib2.HttpLib2Error, httplib.HTTPException) as e:
+ logging.warning("Request fail: PUT %s => %s: %s" %
+ (url, type(e), str(e)))
+
+ def __init__(self):
+ self.lock = threading.Lock()
+ self.service_roots = None
+
+ def shuffled_service_roots(self, hash):
+ if self.service_roots == None:
+ self.lock.acquire()
+ keep_disks = api().keep_disks().list().execute()['items']
+ roots = (("http%s://%s:%d/" %
+ ('s' if f['service_ssl_flag'] else '',
+ f['service_host'],
+ f['service_port']))
+ for f in keep_disks)
+ self.service_roots = sorted(set(roots))
+ logging.debug(str(self.service_roots))
+ self.lock.release()
+ seed = hash
+ pool = self.service_roots[:]
+ pseq = []
+ while len(pool) > 0:
+ if len(seed) < 8:
+ if len(pseq) < len(hash) / 4: # first time around
+ seed = hash[-4:] + hash
+ else:
+ seed += hash
+ probe = int(seed[0:8], 16) % len(pool)
+ pseq += [pool[probe]]
+ pool = pool[:probe] + pool[probe+1:]
+ seed = seed[8:]
+ logging.debug(str(pseq))
+ return pseq
+
+ def get(self, locator):
+ global config
+ if re.search(r',', locator):
+ return ''.join(self.get(x) for x in locator.split(','))
+ if 'KEEP_LOCAL_STORE' in os.environ:
+ return KeepClient.local_store_get(locator)
+ expect_hash = re.sub(r'\+.*', '', locator)
+ for service_root in self.shuffled_service_roots(expect_hash):
+ h = httplib2.Http()
+ url = service_root + expect_hash
+ api_token = config['ARVADOS_API_TOKEN']
+ headers = {'Authorization': "OAuth2 %s" % api_token,
+ 'Accept': 'application/octet-stream'}
+ try:
+ resp, content = h.request(url.encode('utf-8'), 'GET',
+ headers=headers)
+ if re.match(r'^2\d\d$', resp['status']):
+ m = hashlib.new('md5')
+ m.update(content)
+ md5 = m.hexdigest()
+ if md5 == expect_hash:
+ return content
+ logging.warning("Checksum fail: md5(%s) = %s" % (url, md5))
+ except (httplib2.HttpLib2Error, httplib.ResponseNotReady) as e:
+ logging.info("Request fail: GET %s => %s: %s" %
+ (url, type(e), str(e)))
+ raise errors.NotFoundError("Block not found: %s" % expect_hash)
+
+ def put(self, data, **kwargs):
+ if 'KEEP_LOCAL_STORE' in os.environ:
+ return KeepClient.local_store_put(data)
+ m = hashlib.new('md5')
+ m.update(data)
+ data_hash = m.hexdigest()
+ have_copies = 0
+ want_copies = kwargs.get('copies', 2)
+ if not (want_copies > 0):
+ return data_hash
+ threads = []
+ thread_limiter = KeepClient.ThreadLimiter(want_copies)
+ for service_root in self.shuffled_service_roots(data_hash):
+ t = KeepClient.KeepWriterThread(data=data,
+ data_hash=data_hash,
+ service_root=service_root,
+ thread_limiter=thread_limiter)
+ t.start()
+ threads += [t]
+ for t in threads:
+ t.join()
+ have_copies = thread_limiter.done()
+ if have_copies == want_copies:
+ return (data_hash + '+' + str(len(data)))
+ raise errors.KeepWriteError(
+ "Write fail for %s: wanted %d but wrote %d" %
+ (data_hash, want_copies, have_copies))
+
+ @staticmethod
+ def sign_for_old_server(data_hash, data):
+ return (("-----BEGIN PGP SIGNED MESSAGE-----\n\n\n%d %s\n-----BEGIN PGP SIGNATURE-----\n\n-----END PGP SIGNATURE-----\n" % (int(time.time()), data_hash)) + data)
+
+
+ @staticmethod
+ def local_store_put(data):
+ m = hashlib.new('md5')
+ m.update(data)
+ md5 = m.hexdigest()
+ locator = '%s+%d' % (md5, len(data))
+ with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'), 'w') as f:
+ f.write(data)
+ os.rename(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'),
+ os.path.join(os.environ['KEEP_LOCAL_STORE'], md5))
+ return locator
+ @staticmethod
+ def local_store_get(locator):
+ r = re.search('^([0-9a-f]{32,})', locator)
+ if not r:
+ raise errors.NotFoundError(
+ "Invalid data locator: '%s'" % locator)
+ if r.group(0) == EMPTY_BLOCK_LOCATOR.split('+')[0]:
+ return ''
+ with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], r.group(0)), 'r') as f:
+ return f.read()
diff --git a/sdk/python/arvados/Stream.py b/sdk/python/arvados/Stream.py
new file mode 100644
index 0000000..b62b359
--- /dev/null
+++ b/sdk/python/arvados/Stream.py
@@ -0,0 +1,203 @@
+import gflags
+import httplib
+import httplib2
+import logging
+import os
+import pprint
+import sys
+import types
+import subprocess
+import json
+import UserDict
+import re
+import hashlib
+import string
+import bz2
+import zlib
+import fcntl
+import time
+import threading
+
+class StreamFileReader(object):
+ def __init__(self, stream, pos, size, name):
+ self._stream = stream
+ self._pos = pos
+ self._size = size
+ self._name = name
+ self._filepos = 0
+ def name(self):
+ return self._name
+ def decompressed_name(self):
+ return re.sub('\.(bz2|gz)$', '', self._name)
+ def size(self):
+ return self._size
+ def stream_name(self):
+ return self._stream.name()
+ def read(self, size, **kwargs):
+ self._stream.seek(self._pos + self._filepos)
+ data = self._stream.read(min(size, self._size - self._filepos))
+ self._filepos += len(data)
+ return data
+ def readall(self, size=2**20, **kwargs):
+ while True:
+ data = self.read(size, **kwargs)
+ if data == '':
+ break
+ yield data
+ def bunzip2(self, size):
+ decompressor = bz2.BZ2Decompressor()
+ for chunk in self.readall(size):
+ data = decompressor.decompress(chunk)
+ if data and data != '':
+ yield data
+ def gunzip(self, size):
+ decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
+ for chunk in self.readall(size):
+ data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
+ if data and data != '':
+ yield data
+ def readall_decompressed(self, size=2**20):
+ self._stream.seek(self._pos + self._filepos)
+ if re.search('\.bz2$', self._name):
+ return self.bunzip2(size)
+ elif re.search('\.gz$', self._name):
+ return self.gunzip(size)
+ else:
+ return self.readall(size)
+ def readlines(self, decompress=True):
+ if decompress:
+ datasource = self.readall_decompressed()
+ else:
+ self._stream.seek(self._pos + self._filepos)
+ datasource = self.readall()
+ data = ''
+ for newdata in datasource:
+ data += newdata
+ sol = 0
+ while True:
+ eol = string.find(data, "\n", sol)
+ if eol < 0:
+ break
+ yield data[sol:eol+1]
+ sol = eol+1
+ data = data[sol:]
+ if data != '':
+ yield data
+ def as_manifest(self):
+ if self.size() == 0:
+ return ("%s %s 0:0:%s\n"
+ % (self._stream.name(), EMPTY_BLOCK_LOCATOR, self.name()))
+ return string.join(self._stream.tokens_for_range(self._pos, self._size),
+ " ") + "\n"
+
+class StreamReader(object):
+ def __init__(self, tokens):
+ self._tokens = tokens
+ self._current_datablock_data = None
+ self._current_datablock_pos = 0
+ self._current_datablock_index = -1
+ self._pos = 0
+
+ self._stream_name = None
+ self.data_locators = []
+ self.files = []
+
+ for tok in self._tokens:
+ if self._stream_name == None:
+ self._stream_name = tok.replace('\\040', ' ')
+ elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
+ self.data_locators += [tok]
+ elif re.search(r'^\d+:\d+:\S+', tok):
+ pos, size, name = tok.split(':',2)
+ self.files += [[int(pos), int(size), name.replace('\\040', ' ')]]
+ else:
+ raise errors.SyntaxError("Invalid manifest format")
+
+ def tokens(self):
+ return self._tokens
+ def tokens_for_range(self, range_start, range_size):
+ resp = [self._stream_name]
+ return_all_tokens = False
+ block_start = 0
+ token_bytes_skipped = 0
+ for locator in self.data_locators:
+ sizehint = re.search(r'\+(\d+)', locator)
+ if not sizehint:
+ return_all_tokens = True
+ if return_all_tokens:
+ resp += [locator]
+ next
+ blocksize = int(sizehint.group(0))
+ if range_start + range_size <= block_start:
+ break
+ if range_start < block_start + blocksize:
+ resp += [locator]
+ else:
+ token_bytes_skipped += blocksize
+ block_start += blocksize
+ for f in self.files:
+ if ((f[0] < range_start + range_size)
+ and
+ (f[0] + f[1] > range_start)
+ and
+ f[1] > 0):
+ resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
+ return resp
+ def name(self):
+ return self._stream_name
+ def all_files(self):
+ for f in self.files:
+ pos, size, name = f
+ yield StreamFileReader(self, pos, size, name)
+ def nextdatablock(self):
+ if self._current_datablock_index < 0:
+ self._current_datablock_pos = 0
+ self._current_datablock_index = 0
+ else:
+ self._current_datablock_pos += self.current_datablock_size()
+ self._current_datablock_index += 1
+ self._current_datablock_data = None
+ def current_datablock_data(self):
+ if self._current_datablock_data == None:
+ self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
+ return self._current_datablock_data
+ def current_datablock_size(self):
+ if self._current_datablock_index < 0:
+ self.nextdatablock()
+ sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
+ if sizehint:
+ return int(sizehint.group(0))
+ return len(self.current_datablock_data())
+ def seek(self, pos):
+ """Set the position of the next read operation."""
+ self._pos = pos
+ def really_seek(self):
+ """Find and load the appropriate data block, so the byte at
+ _pos is in memory.
+ """
+ if self._pos == self._current_datablock_pos:
+ return True
+ if (self._current_datablock_pos != None and
+ self._pos >= self._current_datablock_pos and
+ self._pos <= self._current_datablock_pos + self.current_datablock_size()):
+ return True
+ if self._pos < self._current_datablock_pos:
+ self._current_datablock_index = -1
+ self.nextdatablock()
+ while (self._pos > self._current_datablock_pos and
+ self._pos > self._current_datablock_pos + self.current_datablock_size()):
+ self.nextdatablock()
+ def read(self, size):
+ """Read no more than size bytes -- but at least one byte,
+ unless _pos is already at the end of the stream.
+ """
+ if size == 0:
+ return ''
+ self.really_seek()
+ while self._pos >= self._current_datablock_pos + self.current_datablock_size():
+ self.nextdatablock()
+ if self._current_datablock_index >= len(self.data_locators):
+ return None
+ data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
+ self._pos += len(data)
+ return data
diff --git a/sdk/python/arvados/__init__.py b/sdk/python/arvados/__init__.py
index dacdba8..b99389a 100644
--- a/sdk/python/arvados/__init__.py
+++ b/sdk/python/arvados/__init__.py
@@ -21,6 +21,11 @@ import threading
import apiclient
import apiclient.discovery
+from Stream import *
+from Collection import *
+from Keep import *
+
+
# Arvados configuration settings are taken from $HOME/.config/arvados.
# Environment variables override settings in the config file.
#
@@ -518,583 +523,7 @@ class util:
allfiles += [ent_base]
return allfiles
-class StreamFileReader(object):
- def __init__(self, stream, pos, size, name):
- self._stream = stream
- self._pos = pos
- self._size = size
- self._name = name
- self._filepos = 0
- def name(self):
- return self._name
- def decompressed_name(self):
- return re.sub('\.(bz2|gz)$', '', self._name)
- def size(self):
- return self._size
- def stream_name(self):
- return self._stream.name()
- def read(self, size, **kwargs):
- self._stream.seek(self._pos + self._filepos)
- data = self._stream.read(min(size, self._size - self._filepos))
- self._filepos += len(data)
- return data
- def readall(self, size=2**20, **kwargs):
- while True:
- data = self.read(size, **kwargs)
- if data == '':
- break
- yield data
- def bunzip2(self, size):
- decompressor = bz2.BZ2Decompressor()
- for chunk in self.readall(size):
- data = decompressor.decompress(chunk)
- if data and data != '':
- yield data
- def gunzip(self, size):
- decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
- for chunk in self.readall(size):
- data = decompressor.decompress(decompressor.unconsumed_tail + chunk)
- if data and data != '':
- yield data
- def readall_decompressed(self, size=2**20):
- self._stream.seek(self._pos + self._filepos)
- if re.search('\.bz2$', self._name):
- return self.bunzip2(size)
- elif re.search('\.gz$', self._name):
- return self.gunzip(size)
- else:
- return self.readall(size)
- def readlines(self, decompress=True):
- if decompress:
- datasource = self.readall_decompressed()
- else:
- self._stream.seek(self._pos + self._filepos)
- datasource = self.readall()
- data = ''
- for newdata in datasource:
- data += newdata
- sol = 0
- while True:
- eol = string.find(data, "\n", sol)
- if eol < 0:
- break
- yield data[sol:eol+1]
- sol = eol+1
- data = data[sol:]
- if data != '':
- yield data
- def as_manifest(self):
- if self.size() == 0:
- return ("%s %s 0:0:%s\n"
- % (self._stream.name(), EMPTY_BLOCK_LOCATOR, self.name()))
- return string.join(self._stream.tokens_for_range(self._pos, self._size),
- " ") + "\n"
-
-class StreamReader(object):
- def __init__(self, tokens):
- self._tokens = tokens
- self._current_datablock_data = None
- self._current_datablock_pos = 0
- self._current_datablock_index = -1
- self._pos = 0
-
- self._stream_name = None
- self.data_locators = []
- self.files = []
-
- for tok in self._tokens:
- if self._stream_name == None:
- self._stream_name = tok.replace('\\040', ' ')
- elif re.search(r'^[0-9a-f]{32}(\+\S+)*$', tok):
- self.data_locators += [tok]
- elif re.search(r'^\d+:\d+:\S+', tok):
- pos, size, name = tok.split(':',2)
- self.files += [[int(pos), int(size), name.replace('\\040', ' ')]]
- else:
- raise errors.SyntaxError("Invalid manifest format")
-
- def tokens(self):
- return self._tokens
- def tokens_for_range(self, range_start, range_size):
- resp = [self._stream_name]
- return_all_tokens = False
- block_start = 0
- token_bytes_skipped = 0
- for locator in self.data_locators:
- sizehint = re.search(r'\+(\d+)', locator)
- if not sizehint:
- return_all_tokens = True
- if return_all_tokens:
- resp += [locator]
- next
- blocksize = int(sizehint.group(0))
- if range_start + range_size <= block_start:
- break
- if range_start < block_start + blocksize:
- resp += [locator]
- else:
- token_bytes_skipped += blocksize
- block_start += blocksize
- for f in self.files:
- if ((f[0] < range_start + range_size)
- and
- (f[0] + f[1] > range_start)
- and
- f[1] > 0):
- resp += ["%d:%d:%s" % (f[0] - token_bytes_skipped, f[1], f[2])]
- return resp
- def name(self):
- return self._stream_name
- def all_files(self):
- for f in self.files:
- pos, size, name = f
- yield StreamFileReader(self, pos, size, name)
- def nextdatablock(self):
- if self._current_datablock_index < 0:
- self._current_datablock_pos = 0
- self._current_datablock_index = 0
- else:
- self._current_datablock_pos += self.current_datablock_size()
- self._current_datablock_index += 1
- self._current_datablock_data = None
- def current_datablock_data(self):
- if self._current_datablock_data == None:
- self._current_datablock_data = Keep.get(self.data_locators[self._current_datablock_index])
- return self._current_datablock_data
- def current_datablock_size(self):
- if self._current_datablock_index < 0:
- self.nextdatablock()
- sizehint = re.search('\+(\d+)', self.data_locators[self._current_datablock_index])
- if sizehint:
- return int(sizehint.group(0))
- return len(self.current_datablock_data())
- def seek(self, pos):
- """Set the position of the next read operation."""
- self._pos = pos
- def really_seek(self):
- """Find and load the appropriate data block, so the byte at
- _pos is in memory.
- """
- if self._pos == self._current_datablock_pos:
- return True
- if (self._current_datablock_pos != None and
- self._pos >= self._current_datablock_pos and
- self._pos <= self._current_datablock_pos + self.current_datablock_size()):
- return True
- if self._pos < self._current_datablock_pos:
- self._current_datablock_index = -1
- self.nextdatablock()
- while (self._pos > self._current_datablock_pos and
- self._pos > self._current_datablock_pos + self.current_datablock_size()):
- self.nextdatablock()
- def read(self, size):
- """Read no more than size bytes -- but at least one byte,
- unless _pos is already at the end of the stream.
- """
- if size == 0:
- return ''
- self.really_seek()
- while self._pos >= self._current_datablock_pos + self.current_datablock_size():
- self.nextdatablock()
- if self._current_datablock_index >= len(self.data_locators):
- return None
- data = self.current_datablock_data()[self._pos - self._current_datablock_pos : self._pos - self._current_datablock_pos + size]
- self._pos += len(data)
- return data
-
-class CollectionReader(object):
- def __init__(self, manifest_locator_or_text):
- if re.search(r'^\S+( [a-f0-9]{32,}(\+\S+)*)+( \d+:\d+:\S+)+\n', manifest_locator_or_text):
- self._manifest_text = manifest_locator_or_text
- self._manifest_locator = None
- else:
- self._manifest_locator = manifest_locator_or_text
- self._manifest_text = None
- self._streams = None
- def __enter__(self):
- pass
- def __exit__(self):
- pass
- def _populate(self):
- if self._streams != None:
- return
- if not self._manifest_text:
- self._manifest_text = Keep.get(self._manifest_locator)
- self._streams = []
- for stream_line in self._manifest_text.split("\n"):
- if stream_line != '':
- stream_tokens = stream_line.split()
- self._streams += [stream_tokens]
- def all_streams(self):
- self._populate()
- resp = []
- for s in self._streams:
- resp += [StreamReader(s)]
- return resp
- def all_files(self):
- for s in self.all_streams():
- for f in s.all_files():
- yield f
- def manifest_text(self):
- self._populate()
- return self._manifest_text
-
-class CollectionWriter(object):
- KEEP_BLOCK_SIZE = 2**26
- def __init__(self):
- self._data_buffer = []
- self._data_buffer_len = 0
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = '.'
- self._current_file_name = None
- self._current_file_pos = 0
- self._finished_streams = []
- def __enter__(self):
- pass
- def __exit__(self):
- self.finish()
- def write_directory_tree(self,
- path, stream_name='.', max_manifest_depth=-1):
- self.start_new_stream(stream_name)
- todo = []
- if max_manifest_depth == 0:
- dirents = sorted(util.listdir_recursive(path))
- else:
- dirents = sorted(os.listdir(path))
- for dirent in dirents:
- target = os.path.join(path, dirent)
- if os.path.isdir(target):
- todo += [[target,
- os.path.join(stream_name, dirent),
- max_manifest_depth-1]]
- else:
- self.start_new_file(dirent)
- with open(target, 'rb') as f:
- while True:
- buf = f.read(2**26)
- if len(buf) == 0:
- break
- self.write(buf)
- self.finish_current_stream()
- map(lambda x: self.write_directory_tree(*x), todo)
-
- def write(self, newdata):
- if hasattr(newdata, '__iter__'):
- for s in newdata:
- self.write(s)
- return
- self._data_buffer += [newdata]
- self._data_buffer_len += len(newdata)
- self._current_stream_length += len(newdata)
- while self._data_buffer_len >= self.KEEP_BLOCK_SIZE:
- self.flush_data()
- def flush_data(self):
- data_buffer = ''.join(self._data_buffer)
- if data_buffer != '':
- self._current_stream_locators += [Keep.put(data_buffer[0:self.KEEP_BLOCK_SIZE])]
- self._data_buffer = [data_buffer[self.KEEP_BLOCK_SIZE:]]
- self._data_buffer_len = len(self._data_buffer[0])
- def start_new_file(self, newfilename=None):
- self.finish_current_file()
- self.set_current_file_name(newfilename)
- def set_current_file_name(self, newfilename):
- if re.search(r'[\t\n]', newfilename):
- raise errors.AssertionError(
- "Manifest filenames cannot contain whitespace: %s" %
- newfilename)
- self._current_file_name = newfilename
- def current_file_name(self):
- return self._current_file_name
- def finish_current_file(self):
- if self._current_file_name == None:
- if self._current_file_pos == self._current_stream_length:
- return
- raise errors.AssertionError(
- "Cannot finish an unnamed file " +
- "(%d bytes at offset %d in '%s' stream)" %
- (self._current_stream_length - self._current_file_pos,
- self._current_file_pos,
- self._current_stream_name))
- self._current_stream_files += [[self._current_file_pos,
- self._current_stream_length - self._current_file_pos,
- self._current_file_name]]
- self._current_file_pos = self._current_stream_length
- def start_new_stream(self, newstreamname='.'):
- self.finish_current_stream()
- self.set_current_stream_name(newstreamname)
- def set_current_stream_name(self, newstreamname):
- if re.search(r'[\t\n]', newstreamname):
- raise errors.AssertionError(
- "Manifest stream names cannot contain whitespace")
- self._current_stream_name = '.' if newstreamname=='' else newstreamname
- def current_stream_name(self):
- return self._current_stream_name
- def finish_current_stream(self):
- self.finish_current_file()
- self.flush_data()
- if len(self._current_stream_files) == 0:
- pass
- elif self._current_stream_name == None:
- raise errors.AssertionError(
- "Cannot finish an unnamed stream (%d bytes in %d files)" %
- (self._current_stream_length, len(self._current_stream_files)))
- else:
- if len(self._current_stream_locators) == 0:
- self._current_stream_locators += [EMPTY_BLOCK_LOCATOR]
- self._finished_streams += [[self._current_stream_name,
- self._current_stream_locators,
- self._current_stream_files]]
- self._current_stream_files = []
- self._current_stream_length = 0
- self._current_stream_locators = []
- self._current_stream_name = None
- self._current_file_pos = 0
- self._current_file_name = None
- def finish(self):
- return Keep.put(self.manifest_text())
- def manifest_text(self):
- self.finish_current_stream()
- manifest = ''
- for stream in self._finished_streams:
- if not re.search(r'^\.(/.*)?$', stream[0]):
- manifest += './'
- manifest += stream[0].replace(' ', '\\040')
- for locator in stream[1]:
- manifest += " %s" % locator
- for sfile in stream[2]:
- manifest += " %d:%d:%s" % (sfile[0], sfile[1], sfile[2].replace(' ', '\\040'))
- manifest += "\n"
- return manifest
- def data_locators(self):
- ret = []
- for name, locators, files in self._finished_streams:
- ret += locators
- return ret
-
-global_client_object = None
-
-class Keep:
- @staticmethod
- def global_client_object():
- global global_client_object
- if global_client_object == None:
- global_client_object = KeepClient()
- return global_client_object
-
- @staticmethod
- def get(locator, **kwargs):
- return Keep.global_client_object().get(locator, **kwargs)
- @staticmethod
- def put(data, **kwargs):
- return Keep.global_client_object().put(data, **kwargs)
-
-class KeepClient(object):
-
- class ThreadLimiter(object):
- """
- Limit the number of threads running at a given time to
- {desired successes} minus {successes reported}. When successes
- reported == desired, wake up the remaining threads and tell
- them to quit.
-
- Should be used in a "with" block.
- """
- def __init__(self, todo):
- self._todo = todo
- self._done = 0
- self._todo_lock = threading.Semaphore(todo)
- self._done_lock = threading.Lock()
- def __enter__(self):
- self._todo_lock.acquire()
- return self
- def __exit__(self, type, value, traceback):
- self._todo_lock.release()
- def shall_i_proceed(self):
- """
- Return true if the current thread should do stuff. Return
- false if the current thread should just stop.
- """
- with self._done_lock:
- return (self._done < self._todo)
- def increment_done(self):
- """
- Report that the current thread was successful.
- """
- with self._done_lock:
- self._done += 1
- def done(self):
- """
- Return how many successes were reported.
- """
- with self._done_lock:
- return self._done
-
- class KeepWriterThread(threading.Thread):
- """
- Write a blob of data to the given Keep server. Call
- increment_done() of the given ThreadLimiter if the write
- succeeds.
- """
- def __init__(self, **kwargs):
- super(KeepClient.KeepWriterThread, self).__init__()
- self.args = kwargs
- def run(self):
- global config
- with self.args['thread_limiter'] as limiter:
- if not limiter.shall_i_proceed():
- # My turn arrived, but the job has been done without
- # me.
- return
- logging.debug("KeepWriterThread %s proceeding %s %s" %
- (str(threading.current_thread()),
- self.args['data_hash'],
- self.args['service_root']))
- h = httplib2.Http()
- url = self.args['service_root'] + self.args['data_hash']
- api_token = config['ARVADOS_API_TOKEN']
- headers = {'Authorization': "OAuth2 %s" % api_token}
- try:
- resp, content = h.request(url.encode('utf-8'), 'PUT',
- headers=headers,
- body=self.args['data'])
- if (resp['status'] == '401' and
- re.match(r'Timestamp verification failed', content)):
- body = KeepClient.sign_for_old_server(
- self.args['data_hash'],
- self.args['data'])
- h = httplib2.Http()
- resp, content = h.request(url.encode('utf-8'), 'PUT',
- headers=headers,
- body=body)
- if re.match(r'^2\d\d$', resp['status']):
- logging.debug("KeepWriterThread %s succeeded %s %s" %
- (str(threading.current_thread()),
- self.args['data_hash'],
- self.args['service_root']))
- return limiter.increment_done()
- logging.warning("Request fail: PUT %s => %s %s" %
- (url, resp['status'], content))
- except (httplib2.HttpLib2Error, httplib.HTTPException) as e:
- logging.warning("Request fail: PUT %s => %s: %s" %
- (url, type(e), str(e)))
-
- def __init__(self):
- self.lock = threading.Lock()
- self.service_roots = None
-
- def shuffled_service_roots(self, hash):
- if self.service_roots == None:
- self.lock.acquire()
- keep_disks = api().keep_disks().list().execute()['items']
- roots = (("http%s://%s:%d/" %
- ('s' if f['service_ssl_flag'] else '',
- f['service_host'],
- f['service_port']))
- for f in keep_disks)
- self.service_roots = sorted(set(roots))
- logging.debug(str(self.service_roots))
- self.lock.release()
- seed = hash
- pool = self.service_roots[:]
- pseq = []
- while len(pool) > 0:
- if len(seed) < 8:
- if len(pseq) < len(hash) / 4: # first time around
- seed = hash[-4:] + hash
- else:
- seed += hash
- probe = int(seed[0:8], 16) % len(pool)
- pseq += [pool[probe]]
- pool = pool[:probe] + pool[probe+1:]
- seed = seed[8:]
- logging.debug(str(pseq))
- return pseq
-
- def get(self, locator):
- global config
- if re.search(r',', locator):
- return ''.join(self.get(x) for x in locator.split(','))
- if 'KEEP_LOCAL_STORE' in os.environ:
- return KeepClient.local_store_get(locator)
- expect_hash = re.sub(r'\+.*', '', locator)
- for service_root in self.shuffled_service_roots(expect_hash):
- h = httplib2.Http()
- url = service_root + expect_hash
- api_token = config['ARVADOS_API_TOKEN']
- headers = {'Authorization': "OAuth2 %s" % api_token,
- 'Accept': 'application/octet-stream'}
- try:
- resp, content = h.request(url.encode('utf-8'), 'GET',
- headers=headers)
- if re.match(r'^2\d\d$', resp['status']):
- m = hashlib.new('md5')
- m.update(content)
- md5 = m.hexdigest()
- if md5 == expect_hash:
- return content
- logging.warning("Checksum fail: md5(%s) = %s" % (url, md5))
- except (httplib2.HttpLib2Error, httplib.ResponseNotReady) as e:
- logging.info("Request fail: GET %s => %s: %s" %
- (url, type(e), str(e)))
- raise errors.NotFoundError("Block not found: %s" % expect_hash)
-
- def put(self, data, **kwargs):
- if 'KEEP_LOCAL_STORE' in os.environ:
- return KeepClient.local_store_put(data)
- m = hashlib.new('md5')
- m.update(data)
- data_hash = m.hexdigest()
- have_copies = 0
- want_copies = kwargs.get('copies', 2)
- if not (want_copies > 0):
- return data_hash
- threads = []
- thread_limiter = KeepClient.ThreadLimiter(want_copies)
- for service_root in self.shuffled_service_roots(data_hash):
- t = KeepClient.KeepWriterThread(data=data,
- data_hash=data_hash,
- service_root=service_root,
- thread_limiter=thread_limiter)
- t.start()
- threads += [t]
- for t in threads:
- t.join()
- have_copies = thread_limiter.done()
- if have_copies == want_copies:
- return (data_hash + '+' + str(len(data)))
- raise errors.KeepWriteError(
- "Write fail for %s: wanted %d but wrote %d" %
- (data_hash, want_copies, have_copies))
-
- @staticmethod
- def sign_for_old_server(data_hash, data):
- return (("-----BEGIN PGP SIGNED MESSAGE-----\n\n\n%d %s\n-----BEGIN PGP SIGNATURE-----\n\n-----END PGP SIGNATURE-----\n" % (int(time.time()), data_hash)) + data)
-
-
- @staticmethod
- def local_store_put(data):
- m = hashlib.new('md5')
- m.update(data)
- md5 = m.hexdigest()
- locator = '%s+%d' % (md5, len(data))
- with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'), 'w') as f:
- f.write(data)
- os.rename(os.path.join(os.environ['KEEP_LOCAL_STORE'], md5 + '.tmp'),
- os.path.join(os.environ['KEEP_LOCAL_STORE'], md5))
- return locator
- @staticmethod
- def local_store_get(locator):
- r = re.search('^([0-9a-f]{32,})', locator)
- if not r:
- raise errors.NotFoundError(
- "Invalid data locator: '%s'" % locator)
- if r.group(0) == EMPTY_BLOCK_LOCATOR.split('+')[0]:
- return ''
- with open(os.path.join(os.environ['KEEP_LOCAL_STORE'], r.group(0)), 'r') as f:
- return f.read()
# We really shouldn't do this but some clients still use
# arvados.service.* directly instead of arvados.api().*
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list