[ARVADOS] updated: 596d448876dc75b50559cd8590aac7d53f06b25a
git at public.curoverse.com
git at public.curoverse.com
Tue Nov 4 16:54:18 EST 2014
Summary of changes:
.../nodemanager/arvnodeman/computenode/__init__.py | 39 +++++++++-------------
services/nodemanager/arvnodeman/daemon.py | 6 ++--
services/nodemanager/tests/test_computenode.py | 32 ++++++++++--------
services/nodemanager/tests/test_daemon.py | 24 ++++++-------
4 files changed, 49 insertions(+), 52 deletions(-)
via 596d448876dc75b50559cd8590aac7d53f06b25a (commit)
from be60ff5cedc313f32b2b9ff153f87d848bff1099 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 596d448876dc75b50559cd8590aac7d53f06b25a
Author: Brett Smith <brett at curoverse.com>
Date: Tue Nov 4 16:54:10 2014 -0500
4357: Eschew node monitor state() for in_state().
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
index 2c1ab9b..1df150c 100644
--- a/services/nodemanager/arvnodeman/computenode/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/__init__.py
@@ -12,11 +12,6 @@ import pykka
from ..clientactor import _notify_subscribers
from .. import config
-# Node states - mostly matching SLURM states
-UNKNOWN = 0
-IDLE = 50
-ALLOC = 100
-
def arvados_node_fqdn(arvados_node, default_hostname='dynamic.compute'):
hostname = arvados_node.get('hostname') or default_hostname
return '{}.{}'.format(hostname, arvados_node['domain'])
@@ -352,29 +347,27 @@ class ComputeNodeMonitorActor(config.actor_class):
self._last_log = msg
self._logger.debug(msg, *args)
- def state(self):
- if ((self.arvados_node is None) or
- (not timestamp_fresh(arvados_node_mtime(self.arvados_node),
- self.poll_stale_after))):
- return UNKNOWN
- elif ((self.arvados_node['info'].get('slurm_state') == 'idle') and
- (not self.arvados_node['job_uuid'])):
- return IDLE
- else:
- return ALLOC
+ def in_state(self, *states):
+ # Return a boolean to say whether or not our Arvados node record is in
+ # one of the given states. If the Arvados node record is unavailable
+ # or stale, return None.
+ if (self.arvados_node is None) or not timestamp_fresh(
+ arvados_node_mtime(self.arvados_node), self.node_stale_after):
+ return None
+ state = self.arvados_node['info'].get('slurm_state')
+ result = state in states
+ if result and state == 'idle':
+ result = not self.arvados_node['job_uuid']
+ return result
def _shutdown_eligible(self):
- state = self.state()
- if state == IDLE:
- return True
- elif state == UNKNOWN:
+ if self.arvados_node is None:
# If this is a new, unpaired node, it's eligible for
# shutdown--we figure there was an error during bootstrap.
- return ((self.arvados_node is None) and
- timestamp_fresh(self.cloud_node_start_time,
- self.node_stale_after))
+ return timestamp_fresh(self.cloud_node_start_time,
+ self.node_stale_after)
else:
- return False
+ return self.in_state('idle')
def consider_shutdown(self):
next_opening = self._shutdowns.next_opening()
diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py
index 4b1bce3..ba5f354 100644
--- a/services/nodemanager/arvnodeman/daemon.py
+++ b/services/nodemanager/arvnodeman/daemon.py
@@ -193,10 +193,10 @@ class NodeManagerDaemonActor(actor_class):
return up - len(self.shutdowns)
def _nodes_busy(self):
- return sum(1 for state in
- pykka.get_all(rec.actor.state() for rec in
+ return sum(1 for alloc in
+ pykka.get_all(rec.actor.in_state('alloc') for rec in
self.cloud_nodes.nodes.itervalues())
- if state == cnode.ALLOC)
+ if alloc)
def _nodes_wanted(self):
return min(len(self.last_wishlist) + self._nodes_busy(),
diff --git a/services/nodemanager/tests/test_computenode.py b/services/nodemanager/tests/test_computenode.py
index 903ff82..921a6c1 100644
--- a/services/nodemanager/tests/test_computenode.py
+++ b/services/nodemanager/tests/test_computenode.py
@@ -188,25 +188,29 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
self.updates, arv_node).proxy()
self.node_actor.subscribe(self.subscriber).get(self.TIMEOUT)
- def node_state(self):
- return self.node_actor.state().get(self.TIMEOUT)
+ def node_state(self, *states):
+ return self.node_actor.in_state(*states).get(self.TIMEOUT)
- def test_state_unknown_without_pairing(self):
+ def test_in_state_when_unpaired(self):
self.make_actor()
- self.assertEqual(cnode.UNKNOWN, self.node_state())
+ self.assertIsNone(self.node_state('idle', 'alloc'))
- def test_idle_state(self):
+ def test_in_state_when_pairing_stale(self):
+ self.make_actor(arv_node=testutil.arvados_node_mock(
+ job_uuid=None, age=90000))
+ self.assertIsNone(self.node_state('idle', 'alloc'))
+
+ def test_in_idle_state(self):
self.make_actor(2, arv_node=testutil.arvados_node_mock(job_uuid=None))
- self.assertEqual(cnode.IDLE, self.node_state())
+ self.assertTrue(self.node_state('idle'))
+ self.assertFalse(self.node_state('alloc'))
+ self.assertTrue(self.node_state('idle', 'alloc'))
- def test_alloc_state(self):
+ def test_in_alloc_state(self):
self.make_actor(3, arv_node=testutil.arvados_node_mock(job_uuid=True))
- self.assertEqual(cnode.ALLOC, self.node_state())
-
- def test_state_unknown_with_stale_pairing(self):
- self.make_actor(4, arv_node=testutil.arvados_node_mock(
- job_uuid=True, age=90000))
- self.assertEqual(cnode.UNKNOWN, self.node_state())
+ self.assertFalse(self.node_state('idle'))
+ self.assertTrue(self.node_state('alloc'))
+ self.assertTrue(self.node_state('idle', 'alloc'))
def test_init_shutdown_scheduling(self):
self.make_actor()
@@ -257,7 +261,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
self.check_shutdown_rescheduled(True, 600)
def test_no_shutdown_when_node_state_stale(self):
- self.make_actor(6, testutil.arvados_node_mock(6, age=900))
+ self.make_actor(6, testutil.arvados_node_mock(6, age=90000))
self.check_shutdown_rescheduled(True, 600)
def test_arvados_node_match(self):
diff --git a/services/nodemanager/tests/test_daemon.py b/services/nodemanager/tests/test_daemon.py
index 5c8a0ae..93e4435 100644
--- a/services/nodemanager/tests/test_daemon.py
+++ b/services/nodemanager/tests/test_daemon.py
@@ -41,7 +41,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def monitor_list(self):
return pykka.ActorRegistry.get_by_class(nmcnode.ComputeNodeMonitorActor)
- def monitor_count(self):
+ def alive_monitor_count(self):
return sum(1 for actor in self.monitor_list() if actor.is_alive())
def test_easy_node_creation(self):
@@ -55,7 +55,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
arv_node = testutil.arvados_node_mock(1)
self.make_daemon([cloud_node], [arv_node])
self.stop_proxy(self.daemon)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
self.assertIs(
self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
arv_node)
@@ -67,7 +67,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
arv_node = testutil.arvados_node_mock(2)
self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
self.assertIs(
self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
arv_node)
@@ -148,14 +148,14 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
setup = self.start_node_boot(cloud_node)
self.daemon.update_cloud_nodes([cloud_node])
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
def test_no_duplication_when_booted_node_listed(self):
cloud_node = testutil.cloud_node_mock(2)
setup = self.start_node_boot(cloud_node, id_num=2)
self.daemon.node_up(setup)
self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
def test_node_counted_after_boot_with_slow_listing(self):
# Test that, after we boot a compute node, we assume it exists
@@ -163,9 +163,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
# propagating tags).
setup = self.start_node_boot()
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
def test_booted_unlisted_node_counted(self):
setup = self.start_node_boot(id_num=1)
@@ -179,7 +179,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def test_booted_node_can_shutdown(self):
setup = self.start_node_boot()
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
monitor = self.monitor_list()[0].proxy()
self.daemon.update_server_wishlist([])
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
@@ -191,7 +191,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
cloud_node = testutil.cloud_node_mock(6)
setup = self.start_node_boot(cloud_node, id_num=6)
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
monitor = self.monitor_list()[0].proxy()
self.daemon.update_server_wishlist([])
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
@@ -221,7 +221,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
cloud_node = testutil.cloud_node_mock(1)
size = testutil.MockSize(1)
self.make_daemon(cloud_nodes=[cloud_node], want_sizes=[size])
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
monitor = self.monitor_list()[0].proxy()
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
@@ -229,7 +229,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def test_shutdown_accepted_below_capacity(self):
self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
- self.assertEqual(1, self.monitor_count())
+ self.assertEqual(1, self.alive_monitor_count())
monitor = self.monitor_list()[0].proxy()
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
@@ -240,7 +240,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
arv_nodes = [testutil.arvados_node_mock(3, job_uuid=True),
testutil.arvados_node_mock(4, job_uuid=None)]
self.make_daemon(cloud_nodes, arv_nodes, [testutil.MockSize(1)])
- self.assertEqual(2, self.monitor_count())
+ self.assertEqual(2, self.alive_monitor_count())
for mon_ref in self.monitor_list():
monitor = mon_ref.proxy()
if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list