[ARVADOS] created: 3955a9928265989222fdb7dfbaf1ad369a725123
Git user
git at public.curoverse.com
Wed Jun 7 15:54:11 EDT 2017
at 3955a9928265989222fdb7dfbaf1ad369a725123 (commit)
commit 3955a9928265989222fdb7dfbaf1ad369a725123
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jun 6 16:22:02 2017 -0400
11345: Fix unit tests after refactoring error types.
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curoverse.com>
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
index 7cf9d63..b11b2de 100644
--- a/services/nodemanager/arvnodeman/computenode/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/__init__.py
@@ -8,6 +8,7 @@ import itertools
import re
import time
+from ..config import CLOUD_ERRORS
from libcloud.common.exceptions import BaseHTTPError
ARVADOS_TIMEFMT = '%Y-%m-%dT%H:%M:%SZ'
@@ -86,7 +87,7 @@ class RetryMixin(object):
pass
if error.code == 429 or error.code >= 500:
should_retry = True
- elif isinstance(error, errors):
+ elif isinstance(error, CLOUD_ERRORS) or isinstance(error, errors) or type(error) is Exception:
should_retry = True
if not should_retry:
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
index 8a397dc..4463ec6 100644
--- a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
@@ -296,6 +296,7 @@ class ComputeNodeUpdateActor(config.actor_class, RetryMixin):
RetryMixin.__init__(self, 1, max_retry_wait,
None, cloud_factory(), timer_actor)
self._cloud = cloud_factory()
+ self._later = self.actor_ref.tell_proxy()
def _set_logger(self):
self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
index 0c8ddc2..11cc4e5 100644
--- a/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
+++ b/services/nodemanager/arvnodeman/computenode/dispatch/slurm.py
@@ -39,7 +39,7 @@ class ComputeNodeShutdownActor(SlurmMixin, ShutdownActorBase):
self._logger.info("Draining SLURM node %s", self._nodename)
self._later.issue_slurm_drain()
- @RetryMixin._retry((subprocess.CalledProcessError,))
+ @RetryMixin._retry((subprocess.CalledProcessError, OSError))
def cancel_shutdown(self, reason, try_resume=True):
if self._nodename:
if try_resume and self._get_slurm_state(self._nodename) in self.SLURM_DRAIN_STATES:
@@ -51,7 +51,7 @@ class ComputeNodeShutdownActor(SlurmMixin, ShutdownActorBase):
pass
return super(ComputeNodeShutdownActor, self).cancel_shutdown(reason)
- @RetryMixin._retry((subprocess.CalledProcessError,))
+ @RetryMixin._retry((subprocess.CalledProcessError, OSError))
def issue_slurm_drain(self):
if self.cancel_reason is not None:
return
@@ -62,7 +62,7 @@ class ComputeNodeShutdownActor(SlurmMixin, ShutdownActorBase):
else:
self._later.shutdown_node()
- @RetryMixin._retry((subprocess.CalledProcessError,))
+ @RetryMixin._retry((subprocess.CalledProcessError, OSError))
def await_slurm_drain(self):
if self.cancel_reason is not None:
return
diff --git a/services/nodemanager/arvnodeman/computenode/driver/__init__.py b/services/nodemanager/arvnodeman/computenode/driver/__init__.py
index 6d23c2b..c8c54dc 100644
--- a/services/nodemanager/arvnodeman/computenode/driver/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/driver/__init__.py
@@ -6,10 +6,9 @@ import logging
from operator import attrgetter
import libcloud.common.types as cloud_types
-from libcloud.common.exceptions import BaseHTTPError
from libcloud.compute.base import NodeDriver, NodeAuthSSHKey
-from ...config import NETWORK_ERRORS
+from ...config import CLOUD_ERRORS
from .. import RetryMixin
class BaseComputeNodeDriver(RetryMixin):
@@ -25,7 +24,7 @@ class BaseComputeNodeDriver(RetryMixin):
Subclasses must implement arvados_create_kwargs, sync_node,
node_fqdn, and node_start_time.
"""
- CLOUD_ERRORS = NETWORK_ERRORS + (cloud_types.LibcloudError,)
+
@RetryMixin._retry()
def _create_driver(self, driver_class, **auth_kwargs):
@@ -169,7 +168,7 @@ class BaseComputeNodeDriver(RetryMixin):
kwargs.update(self.arvados_create_kwargs(size, arvados_node))
kwargs['size'] = size
return self.real.create_node(**kwargs)
- except self.CLOUD_ERRORS as create_error:
+ except CLOUD_ERRORS as create_error:
# Workaround for bug #6702: sometimes the create node request
# succeeds but times out and raises an exception instead of
# returning a result. If this happens, we get stuck in a retry
@@ -209,7 +208,7 @@ class BaseComputeNodeDriver(RetryMixin):
def destroy_node(self, cloud_node):
try:
return self.real.destroy_node(cloud_node)
- except self.CLOUD_ERRORS as destroy_error:
+ except CLOUD_ERRORS as destroy_error:
# Sometimes the destroy node request succeeds but times out and
# raises an exception instead of returning success. If this
# happens, we get a noisy stack trace. Check if the node is still
diff --git a/services/nodemanager/arvnodeman/computenode/driver/azure.py b/services/nodemanager/arvnodeman/computenode/driver/azure.py
index 6e7392a..c707c2a 100644
--- a/services/nodemanager/arvnodeman/computenode/driver/azure.py
+++ b/services/nodemanager/arvnodeman/computenode/driver/azure.py
@@ -17,7 +17,6 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.AZURE_ARM)
SEARCH_CACHE = {}
- CLOUD_ERRORS = BaseComputeNodeDriver.CLOUD_ERRORS
def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
driver_class=DEFAULT_DRIVER):
diff --git a/services/nodemanager/arvnodeman/config.py b/services/nodemanager/arvnodeman/config.py
index f884295..a16e0a8 100644
--- a/services/nodemanager/arvnodeman/config.py
+++ b/services/nodemanager/arvnodeman/config.py
@@ -14,11 +14,15 @@ from apiclient import errors as apierror
from .baseactor import BaseNodeManagerActor
+from libcloud.common.types import LibcloudError
+from libcloud.common.exceptions import BaseHTTPError
+
# IOError is the base class for socket.error, ssl.SSLError, and friends.
# It seems like it hits the sweet spot for operations we want to retry:
# it's low-level, but unlikely to catch code bugs.
NETWORK_ERRORS = (IOError,)
ARVADOS_ERRORS = NETWORK_ERRORS + (apierror.Error,)
+CLOUD_ERRORS = NETWORK_ERRORS + (LibcloudError, BaseHTTPError)
actor_class = BaseNodeManagerActor
diff --git a/services/nodemanager/tests/test_computenode_dispatch.py b/services/nodemanager/tests/test_computenode_dispatch.py
index b950cc1..598b293 100644
--- a/services/nodemanager/tests/test_computenode_dispatch.py
+++ b/services/nodemanager/tests/test_computenode_dispatch.py
@@ -28,7 +28,6 @@ class ComputeNodeSetupActorTestCase(testutil.ActorTestMixin, unittest.TestCase):
self.api_client.nodes().update().execute.side_effect = arvados_effect
self.cloud_client = mock.MagicMock(name='cloud_client')
self.cloud_client.create_node.return_value = testutil.cloud_node_mock(1)
- self.cloud_client.is_cloud_exception = BaseComputeNodeDriver.is_cloud_exception
def make_actor(self, arv_node=None):
if not hasattr(self, 'timer'):
@@ -277,7 +276,8 @@ class ComputeNodeUpdateActorTestCase(testutil.ActorTestMixin,
def make_actor(self):
self.driver = mock.MagicMock(name='driver_mock')
- self.updater = self.ACTOR_CLASS.start(self.driver).proxy()
+ self.timer = mock.MagicMock(name='timer_mock')
+ self.updater = self.ACTOR_CLASS.start(self.driver, self.timer).proxy()
def test_node_sync(self, *args):
self.make_actor()
diff --git a/services/nodemanager/tests/test_computenode_dispatch_slurm.py b/services/nodemanager/tests/test_computenode_dispatch_slurm.py
index e1def28..73bcb57 100644
--- a/services/nodemanager/tests/test_computenode_dispatch_slurm.py
+++ b/services/nodemanager/tests/test_computenode_dispatch_slurm.py
@@ -84,7 +84,7 @@ class SLURMComputeNodeShutdownActorTestCase(ComputeNodeShutdownActorMixin,
self.check_success_flag(False, 2)
def test_issue_slurm_drain_retry(self, proc_mock):
- proc_mock.side_effect = iter([OSError, '', OSError, 'drng\n'])
+ proc_mock.side_effect = iter([OSError, '', OSError, 'drng\n', 'drain\n', 'drain\n'])
self.check_success_after_reset(proc_mock)
def test_arvados_node_cleaned_after_shutdown(self, proc_mock):
diff --git a/services/nodemanager/tests/test_computenode_driver_azure.py b/services/nodemanager/tests/test_computenode_driver_azure.py
index 702688d..c4bc680 100644
--- a/services/nodemanager/tests/test_computenode_driver_azure.py
+++ b/services/nodemanager/tests/test_computenode_driver_azure.py
@@ -69,19 +69,6 @@ class AzureComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase
node.extra = {'tags': {"hostname": name}}
self.assertEqual(name, azure.ComputeNodeDriver.node_fqdn(node))
- def test_cloud_exceptions(self):
- for error in [Exception("test exception"),
- IOError("test exception"),
- ssl.SSLError("test exception"),
- cloud_types.LibcloudError("test exception")]:
- self.assertTrue(azure.ComputeNodeDriver.is_cloud_exception(error),
- "{} not flagged as cloud exception".format(error))
-
- def test_noncloud_exceptions(self):
- self.assertFalse(
- azure.ComputeNodeDriver.is_cloud_exception(ValueError("test error")),
- "ValueError flagged as cloud exception")
-
def test_sync_node(self):
arv_node = testutil.arvados_node_mock(1)
cloud_node = testutil.cloud_node_mock(2)
diff --git a/services/nodemanager/tests/test_computenode_driver_ec2.py b/services/nodemanager/tests/test_computenode_driver_ec2.py
index a778cd5..14df360 100644
--- a/services/nodemanager/tests/test_computenode_driver_ec2.py
+++ b/services/nodemanager/tests/test_computenode_driver_ec2.py
@@ -96,16 +96,3 @@ class EC2ComputeNodeDriverTestCase(testutil.DriverTestMixin, unittest.TestCase):
node = testutil.cloud_node_mock()
node.name = name
self.assertEqual(name, ec2.ComputeNodeDriver.node_fqdn(node))
-
- def test_cloud_exceptions(self):
- for error in [Exception("test exception"),
- IOError("test exception"),
- ssl.SSLError("test exception"),
- cloud_types.LibcloudError("test exception")]:
- self.assertTrue(ec2.ComputeNodeDriver.is_cloud_exception(error),
- "{} not flagged as cloud exception".format(error))
-
- def test_noncloud_exceptions(self):
- self.assertFalse(
- ec2.ComputeNodeDriver.is_cloud_exception(ValueError("test error")),
- "ValueError flagged as cloud exception")
commit 35316e68df3b57042edb22fe2ca7f86f060ff5f9
Author: Peter Amstutz <peter.amstutz at curoverse.com>
Date: Tue Jun 6 14:05:44 2017 -0400
11345: Simplify and consolodate retry for API throttling errors.
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curoverse.com>
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
index 20b274b..7cf9d63 100644
--- a/services/nodemanager/arvnodeman/computenode/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/__init__.py
@@ -8,6 +8,8 @@ import itertools
import re
import time
+from libcloud.common.exceptions import BaseHTTPError
+
ARVADOS_TIMEFMT = '%Y-%m-%dT%H:%M:%SZ'
ARVADOS_TIMESUBSEC_RE = re.compile(r'(\.\d+)Z$')
@@ -73,11 +75,24 @@ class RetryMixin(object):
try:
ret = orig_func(self, *args, **kwargs)
except Exception as error:
- if not (isinstance(error, errors) or
- self._cloud.is_cloud_exception(error)):
+ should_retry = False
+
+ if isinstance(error, BaseHTTPError):
+ if error.headers and error.headers.get("retry-after"):
+ try:
+ self.retry_wait = int(error.headers["retry-after"])
+ should_retry = True
+ except ValueError:
+ pass
+ if error.code == 429 or error.code >= 500:
+ should_retry = True
+ elif isinstance(error, errors):
+ should_retry = True
+
+ if not should_retry:
self.retry_wait = self.min_retry_wait
self._logger.warning(
- "Re-raising unknown error (no retry): %s",
+ "Re-raising error (no retry): %s",
error, exc_info=error)
raise
diff --git a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
index 63dac3f..8a397dc 100644
--- a/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/dispatch/__init__.py
@@ -8,6 +8,8 @@ import time
import re
import libcloud.common.types as cloud_types
+from libcloud.common.exceptions import BaseHTTPError
+
import pykka
from .. import \
@@ -126,7 +128,12 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
try:
self.cloud_node = self._cloud.create_node(self.cloud_size,
self.arvados_node)
- except Exception as e:
+ except BaseHTTPError as e:
+ if e.code == 429 or "RequestLimitExceeded" in e.message:
+ # Don't consider API rate limits to be quota errors.
+ # re-raise so the Retry logic applies.
+ raise
+
# The set of possible error codes / messages isn't documented for
# all clouds, so use a keyword heuristic to determine if the
# failure is likely due to a quota.
@@ -136,7 +143,10 @@ class ComputeNodeSetupActor(ComputeNodeStateChangeBase):
self._finished()
return
else:
+ # Something else happened, re-raise so the Retry logic applies.
raise
+ except Exception as e:
+ raise
# The information included in the node size object we get from libcloud
# is inconsistent between cloud drivers. Replace libcloud NodeSize
@@ -272,7 +282,7 @@ class ComputeNodeShutdownActor(ComputeNodeStateChangeBase):
self._finished(success_flag=True)
-class ComputeNodeUpdateActor(config.actor_class):
+class ComputeNodeUpdateActor(config.actor_class, RetryMixin):
"""Actor to dispatch one-off cloud management requests.
This actor receives requests for small cloud updates, and
@@ -281,12 +291,11 @@ class ComputeNodeUpdateActor(config.actor_class):
dedicated actor for this gives us the opportunity to control the
flow of requests; e.g., by backing off when errors occur.
"""
- def __init__(self, cloud_factory, max_retry_wait=180):
+ def __init__(self, cloud_factory, timer_actor, max_retry_wait=180):
super(ComputeNodeUpdateActor, self).__init__()
+ RetryMixin.__init__(self, 1, max_retry_wait,
+ None, cloud_factory(), timer_actor)
self._cloud = cloud_factory()
- self.max_retry_wait = max_retry_wait
- self.error_streak = 0
- self.next_request_time = time.time()
def _set_logger(self):
self._logger = logging.getLogger("%s.%s" % (self.__class__.__name__, self.actor_urn[33:]))
@@ -294,28 +303,7 @@ class ComputeNodeUpdateActor(config.actor_class):
def on_start(self):
self._set_logger()
- def _throttle_errors(orig_func):
- @functools.wraps(orig_func)
- def throttle_wrapper(self, *args, **kwargs):
- throttle_time = self.next_request_time - time.time()
- if throttle_time > 0:
- time.sleep(throttle_time)
- self.next_request_time = time.time()
- try:
- result = orig_func(self, *args, **kwargs)
- except Exception as error:
- if self._cloud.is_cloud_exception(error):
- self.error_streak += 1
- self.next_request_time += min(2 ** self.error_streak,
- self.max_retry_wait)
- self._logger.warn(
- "Unhandled exception: %s", error, exc_info=error)
- else:
- self.error_streak = 0
- return result
- return throttle_wrapper
-
- @_throttle_errors
+ @RetryMixin._retry()
def sync_node(self, cloud_node, arvados_node):
return self._cloud.sync_node(cloud_node, arvados_node)
diff --git a/services/nodemanager/arvnodeman/computenode/driver/__init__.py b/services/nodemanager/arvnodeman/computenode/driver/__init__.py
index 4420341..6d23c2b 100644
--- a/services/nodemanager/arvnodeman/computenode/driver/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/driver/__init__.py
@@ -25,7 +25,7 @@ class BaseComputeNodeDriver(RetryMixin):
Subclasses must implement arvados_create_kwargs, sync_node,
node_fqdn, and node_start_time.
"""
- CLOUD_ERRORS = NETWORK_ERRORS + (cloud_types.LibcloudError, BaseHTTPError)
+ CLOUD_ERRORS = NETWORK_ERRORS + (cloud_types.LibcloudError,)
@RetryMixin._retry()
def _create_driver(self, driver_class, **auth_kwargs):
@@ -206,14 +206,6 @@ class BaseComputeNodeDriver(RetryMixin):
# seconds since the epoch UTC.
raise NotImplementedError("BaseComputeNodeDriver.node_start_time")
- @classmethod
- def is_cloud_exception(cls, exception):
- # libcloud compute drivers typically raise bare Exceptions to
- # represent API errors. Return True for any exception that is
- # exactly an Exception, or a better-known higher-level exception.
- return (isinstance(exception, cls.CLOUD_ERRORS) or
- type(exception) is Exception)
-
def destroy_node(self, cloud_node):
try:
return self.real.destroy_node(cloud_node)
diff --git a/services/nodemanager/arvnodeman/computenode/driver/azure.py b/services/nodemanager/arvnodeman/computenode/driver/azure.py
index e293d1b..6e7392a 100644
--- a/services/nodemanager/arvnodeman/computenode/driver/azure.py
+++ b/services/nodemanager/arvnodeman/computenode/driver/azure.py
@@ -17,7 +17,7 @@ class ComputeNodeDriver(BaseComputeNodeDriver):
DEFAULT_DRIVER = cloud_provider.get_driver(cloud_types.Provider.AZURE_ARM)
SEARCH_CACHE = {}
- CLOUD_ERRORS = BaseComputeNodeDriver.CLOUD_ERRORS + (BaseHTTPError,)
+ CLOUD_ERRORS = BaseComputeNodeDriver.CLOUD_ERRORS
def __init__(self, auth_kwargs, list_kwargs, create_kwargs,
driver_class=DEFAULT_DRIVER):
diff --git a/services/nodemanager/arvnodeman/launcher.py b/services/nodemanager/arvnodeman/launcher.py
index 3cd097a..72a285b 100644
--- a/services/nodemanager/arvnodeman/launcher.py
+++ b/services/nodemanager/arvnodeman/launcher.py
@@ -128,7 +128,7 @@ def main(args=None):
server_calculator = build_server_calculator(config)
timer, cloud_node_poller, arvados_node_poller, job_queue_poller = \
launch_pollers(config, server_calculator)
- cloud_node_updater = node_update.start(config.new_cloud_client).tell_proxy()
+ cloud_node_updater = node_update.start(config.new_cloud_client, timer).tell_proxy()
node_daemon = NodeManagerDaemonActor.start(
job_queue_poller, arvados_node_poller, cloud_node_poller,
cloud_node_updater, timer,
diff --git a/services/nodemanager/arvnodeman/test/fake_driver.py b/services/nodemanager/arvnodeman/test/fake_driver.py
index 1785e05..8e7cf2f 100644
--- a/services/nodemanager/arvnodeman/test/fake_driver.py
+++ b/services/nodemanager/arvnodeman/test/fake_driver.py
@@ -97,3 +97,31 @@ class FailingDriver(FakeDriver):
ex_tags=None,
ex_network=None):
raise Exception("nope")
+
+class RetryDriver(FakeDriver):
+ def create_node(self, name=None,
+ size=None,
+ image=None,
+ auth=None,
+ ex_storage_account=None,
+ ex_customdata=None,
+ ex_resource_group=None,
+ ex_user_name=None,
+ ex_tags=None,
+ ex_network=None):
+ global create_calls
+ create_calls += 1
+ if create_calls < 2:
+ raise BaseHTTPError(429, "Rate limit exceeded",
+ {'retry-after': '12'})
+ else:
+ return super(RetryDriver, self).create_node(name=name,
+ size=size,
+ image=image,
+ auth=auth,
+ ex_storage_account=ex_storage_account,
+ ex_customdata=ex_customdata,
+ ex_resource_group=ex_resource_group,
+ ex_user_name=ex_user_name,
+ ex_tags=ex_tags,
+ ex_network=ex_network)
diff --git a/services/nodemanager/tests/integration_test.py b/services/nodemanager/tests/integration_test.py
index d4b7e02..2cc6282 100755
--- a/services/nodemanager/tests/integration_test.py
+++ b/services/nodemanager/tests/integration_test.py
@@ -319,6 +319,16 @@ def main():
"34t0i-dz642-h42bg3hq4bdfpf2": "ReqNodeNotAvail",
"34t0i-dz642-h42bg3hq4bdfpf3": "ReqNodeNotAvail",
"34t0i-dz642-h42bg3hq4bdfpf4": "ReqNodeNotAvail"
+ }),
+ "test6": (
+ [
+ (r".*Daemon started", set_squeue),
+ (r".*Rate limit exceeded - scheduling retry in 12 seconds", noop),
+ (r".*Cloud node (\S+) is now paired with Arvados node (\S+) with hostname (\S+)", noop),
+ ],
+ {},
+ "arvnodeman.test.fake_driver.RetryDriver",
+ {"34t0i-dz642-h42bg3hq4bdfpf1": "ReqNodeNotAvail"
})
}
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list