[ARVADOS] updated: 8ad92bb9e7950e0bf758716b40764d26ee33802c
git at public.curoverse.com
git at public.curoverse.com
Wed Nov 5 14:29:41 EST 2014
Summary of changes:
.../nodemanager/arvnodeman/computenode/__init__.py | 22 +++-
services/nodemanager/arvnodeman/daemon.py | 15 ++-
services/nodemanager/tests/test_computenode.py | 30 +++++-
services/nodemanager/tests/test_daemon.py | 119 +++++++++++++--------
services/nodemanager/tests/testutil.py | 2 +-
5 files changed, 135 insertions(+), 53 deletions(-)
via 8ad92bb9e7950e0bf758716b40764d26ee33802c (commit)
via 2c693ee50677969a3aa81fe1ba8fd0c702acc20c (commit)
from f91dff0f2f49a83047e31b1a56dbddd85be6926a (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 8ad92bb9e7950e0bf758716b40764d26ee33802c
Merge: f91dff0 2c693ee
Author: Brett Smith <brett at curoverse.com>
Date: Wed Nov 5 14:29:14 2014 -0500
Merge branch '4357-node-manager-busy-nodes-wip'
Closes #4357, #4370.
commit 2c693ee50677969a3aa81fe1ba8fd0c702acc20c
Author: Brett Smith <brett at curoverse.com>
Date: Fri Oct 31 13:59:59 2014 -0400
4357: Node Manager boots new nodes when up nodes are busy.
The key part of this commit is the change to _nodes_wanted.
Everything else provides the information to make that calculation.
This changes the daemon test to use real monitor actors, rather than a
mock. This hurts test isolation a bit, but it's really tricky to mock
individual monitors otherwise. Because monitors only communicate with
the daemon itself, this seems like a worthwhile trade-off; it doesn't
introduce external dependencies or noticeably increase test time.
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
index 0d4ee7b..63effe9 100644
--- a/services/nodemanager/arvnodeman/computenode/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/__init__.py
@@ -347,14 +347,30 @@ class ComputeNodeMonitorActor(config.actor_class):
self._last_log = msg
self._logger.debug(msg, *args)
+ def in_state(self, *states):
+ # Return a boolean to say whether or not our Arvados node record is in
+ # one of the given states. If state information is not
+ # available--because this node has no Arvados record, the record is
+ # stale, or the record has no state information--return None.
+ if (self.arvados_node is None) or not timestamp_fresh(
+ arvados_node_mtime(self.arvados_node), self.node_stale_after):
+ return None
+ state = self.arvados_node['info'].get('slurm_state')
+ if not state:
+ return None
+ result = state in states
+ if state == 'idle':
+ result = result and not self.arvados_node['job_uuid']
+ return result
+
def _shutdown_eligible(self):
if self.arvados_node is None:
+ # If this is a new, unpaired node, it's eligible for
+ # shutdown--we figure there was an error during bootstrap.
return timestamp_fresh(self.cloud_node_start_time,
self.node_stale_after)
else:
- return (timestamp_fresh(arvados_node_mtime(self.arvados_node),
- self.poll_stale_after) and
- (self.arvados_node['info'].get('slurm_state') == 'idle'))
+ return self.in_state('idle')
def consider_shutdown(self):
next_opening = self._shutdowns.next_opening()
diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py
index 6ea3cdf..83e3ec9 100644
--- a/services/nodemanager/arvnodeman/daemon.py
+++ b/services/nodemanager/arvnodeman/daemon.py
@@ -187,20 +187,27 @@ class NodeManagerDaemonActor(actor_class):
self._pair_nodes(cloud_rec, arv_node)
break
- def _node_count(self):
+ def _nodes_up(self):
up = sum(len(nodelist) for nodelist in
[self.cloud_nodes, self.booted, self.booting])
return up - len(self.shutdowns)
+ def _nodes_busy(self):
+ return sum(1 for idle in
+ pykka.get_all(rec.actor.in_state('idle') for rec in
+ self.cloud_nodes.nodes.itervalues())
+ if idle is False)
+
def _nodes_wanted(self):
- return len(self.last_wishlist) - self._node_count()
+ return min(len(self.last_wishlist) + self._nodes_busy(),
+ self.max_nodes) - self._nodes_up()
def _nodes_excess(self):
- return -self._nodes_wanted()
+ return self._nodes_up() - self._nodes_busy() - len(self.last_wishlist)
def update_server_wishlist(self, wishlist):
self._update_poll_time('server_wishlist')
- self.last_wishlist = wishlist[:self.max_nodes]
+ self.last_wishlist = wishlist
nodes_wanted = self._nodes_wanted()
if nodes_wanted > 0:
self._later.start_node()
diff --git a/services/nodemanager/tests/test_computenode.py b/services/nodemanager/tests/test_computenode.py
index 05022f0..5ced5f9 100644
--- a/services/nodemanager/tests/test_computenode.py
+++ b/services/nodemanager/tests/test_computenode.py
@@ -188,6 +188,34 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
self.updates, arv_node).proxy()
self.node_actor.subscribe(self.subscriber).get(self.TIMEOUT)
+ def node_state(self, *states):
+ return self.node_actor.in_state(*states).get(self.TIMEOUT)
+
+ def test_in_state_when_unpaired(self):
+ self.make_actor()
+ self.assertIsNone(self.node_state('idle', 'alloc'))
+
+ def test_in_state_when_pairing_stale(self):
+ self.make_actor(arv_node=testutil.arvados_node_mock(
+ job_uuid=None, age=90000))
+ self.assertIsNone(self.node_state('idle', 'alloc'))
+
+ def test_in_state_when_no_state_available(self):
+ self.make_actor(arv_node=testutil.arvados_node_mock(info={}))
+ self.assertIsNone(self.node_state('idle', 'alloc'))
+
+ def test_in_idle_state(self):
+ self.make_actor(2, arv_node=testutil.arvados_node_mock(job_uuid=None))
+ self.assertTrue(self.node_state('idle'))
+ self.assertFalse(self.node_state('alloc'))
+ self.assertTrue(self.node_state('idle', 'alloc'))
+
+ def test_in_alloc_state(self):
+ self.make_actor(3, arv_node=testutil.arvados_node_mock(job_uuid=True))
+ self.assertFalse(self.node_state('idle'))
+ self.assertTrue(self.node_state('alloc'))
+ self.assertTrue(self.node_state('idle', 'alloc'))
+
def test_init_shutdown_scheduling(self):
self.make_actor()
self.assertTrue(self.timer.schedule.called)
@@ -237,7 +265,7 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
self.check_shutdown_rescheduled(True, 600)
def test_no_shutdown_when_node_state_stale(self):
- self.make_actor(6, testutil.arvados_node_mock(6, age=900))
+ self.make_actor(6, testutil.arvados_node_mock(6, age=90000))
self.check_shutdown_rescheduled(True, 600)
def test_arvados_node_match(self):
diff --git a/services/nodemanager/tests/test_daemon.py b/services/nodemanager/tests/test_daemon.py
index 869ae4a..93e4435 100644
--- a/services/nodemanager/tests/test_daemon.py
+++ b/services/nodemanager/tests/test_daemon.py
@@ -6,13 +6,16 @@ import time
import unittest
import mock
+import pykka
+import arvnodeman.computenode as nmcnode
import arvnodeman.daemon as nmdaemon
from . import testutil
class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
unittest.TestCase):
- def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[]):
+ def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
+ max_nodes=8):
for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
self.arv_factory = mock.MagicMock(name='arvados_mock')
@@ -20,15 +23,14 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.cloud_factory().node_start_time.return_value = time.time()
self.cloud_updates = mock.MagicMock(name='updates_mock')
self.timer = testutil.MockTimer()
- self.node_factory = mock.MagicMock(name='factory_mock')
self.node_setup = mock.MagicMock(name='setup_mock')
self.node_shutdown = mock.MagicMock(name='shutdown_mock')
self.daemon = nmdaemon.NodeManagerDaemonActor.start(
self.server_wishlist_poller, self.arvados_nodes_poller,
self.cloud_nodes_poller, self.cloud_updates, self.timer,
self.arv_factory, self.cloud_factory,
- [54, 5, 1], 8, 600, 3600,
- self.node_setup, self.node_shutdown, self.node_factory).proxy()
+ [54, 5, 1], max_nodes, 600, 3600,
+ self.node_setup, self.node_shutdown).proxy()
if cloud_nodes is not None:
self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
if arvados_nodes is not None:
@@ -36,6 +38,12 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
if want_sizes is not None:
self.daemon.update_server_wishlist(want_sizes).get(self.TIMEOUT)
+ def monitor_list(self):
+ return pykka.ActorRegistry.get_by_class(nmcnode.ComputeNodeMonitorActor)
+
+ def alive_monitor_count(self):
+ return sum(1 for actor in self.monitor_list() if actor.is_alive())
+
def test_easy_node_creation(self):
size = testutil.MockSize(1)
self.make_daemon(want_sizes=[size])
@@ -47,23 +55,22 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
arv_node = testutil.arvados_node_mock(1)
self.make_daemon([cloud_node], [arv_node])
self.stop_proxy(self.daemon)
- self.node_factory.start().proxy().offer_arvados_pair.assert_called_with(
+ self.assertEqual(1, self.alive_monitor_count())
+ self.assertIs(
+ self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
arv_node)
def test_node_pairing_after_arvados_update(self):
cloud_node = testutil.cloud_node_mock(2)
- arv_node = testutil.arvados_node_mock(2, ip_address=None)
- self.make_daemon([cloud_node], None)
- pair_func = self.node_factory.start().proxy().offer_arvados_pair
- pair_func().get.return_value = None
- self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
- pair_func.assert_called_with(arv_node)
-
- pair_func().get.return_value = cloud_node.id
- pair_func.reset_mock()
+ self.make_daemon([cloud_node],
+ [testutil.arvados_node_mock(2, ip_address=None)])
arv_node = testutil.arvados_node_mock(2)
self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
- pair_func.assert_called_with(arv_node)
+ self.stop_proxy(self.daemon)
+ self.assertEqual(1, self.alive_monitor_count())
+ self.assertIs(
+ self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
+ arv_node)
def test_old_arvados_node_not_double_assigned(self):
arv_node = testutil.arvados_node_mock(3, age=9000)
@@ -100,9 +107,23 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.stop_proxy(self.daemon)
self.assertEqual(1, self.node_setup.start.call_count)
+ def test_boot_new_node_when_all_nodes_busy(self):
+ arv_node = testutil.arvados_node_mock(2, job_uuid=True)
+ self.make_daemon([testutil.cloud_node_mock(2)], [arv_node],
+ [testutil.MockSize(2)])
+ self.stop_proxy(self.daemon)
+ self.assertTrue(self.node_setup.start.called)
+
+ def test_no_new_node_when_max_nodes_busy(self):
+ self.make_daemon([testutil.cloud_node_mock(3)],
+ [testutil.arvados_node_mock(3, job_uuid=True)],
+ [testutil.MockSize(3)],
+ max_nodes=1)
+ self.stop_proxy(self.daemon)
+ self.assertFalse(self.node_setup.start.called)
+
def mock_setup_actor(self, cloud_node, arv_node):
- setup = mock.MagicMock(name='setup_node_mock')
- setup.actor_ref = self.node_setup.start().proxy().actor_ref
+ setup = self.node_setup.start().proxy()
self.node_setup.reset_mock()
setup.actor_urn = cloud_node.id
setup.cloud_node.get.return_value = cloud_node
@@ -125,17 +146,16 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
# get the "node up" message from CloudNodeSetupActor.
cloud_node = testutil.cloud_node_mock(1)
setup = self.start_node_boot(cloud_node)
- self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
- self.assertTrue(self.node_factory.start.called)
+ self.daemon.update_cloud_nodes([cloud_node])
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertEqual(1, self.node_factory.start.call_count)
+ self.assertEqual(1, self.alive_monitor_count())
def test_no_duplication_when_booted_node_listed(self):
cloud_node = testutil.cloud_node_mock(2)
setup = self.start_node_boot(cloud_node, id_num=2)
self.daemon.node_up(setup)
self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
- self.assertEqual(1, self.node_factory.start.call_count)
+ self.assertEqual(1, self.alive_monitor_count())
def test_node_counted_after_boot_with_slow_listing(self):
# Test that, after we boot a compute node, we assume it exists
@@ -143,14 +163,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
# propagating tags).
setup = self.start_node_boot()
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertTrue(self.node_factory.start.called,
- "daemon not monitoring booted node")
- self.daemon.update_cloud_nodes([])
- self.stop_proxy(self.daemon)
- self.assertEqual(1, self.node_factory.start.call_count,
- "daemon has duplicate monitors for booted node")
- self.assertFalse(self.node_factory.start().proxy().stop.called,
- "daemon prematurely stopped monitoring a new node")
+ self.assertEqual(1, self.alive_monitor_count())
+ self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+ self.assertEqual(1, self.alive_monitor_count())
def test_booted_unlisted_node_counted(self):
setup = self.start_node_boot(id_num=1)
@@ -163,10 +178,11 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def test_booted_node_can_shutdown(self):
setup = self.start_node_boot()
- self.daemon.node_up(setup)
+ self.daemon.node_up(setup).get(self.TIMEOUT)
+ self.assertEqual(1, self.alive_monitor_count())
+ monitor = self.monitor_list()[0].proxy()
self.daemon.update_server_wishlist([])
- self.daemon.node_can_shutdown(
- self.node_factory.start().proxy()).get(self.TIMEOUT)
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
self.assertTrue(self.node_shutdown.start.called,
"daemon did not shut down booted node on offer")
@@ -174,9 +190,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def test_booted_node_lifecycle(self):
cloud_node = testutil.cloud_node_mock(6)
setup = self.start_node_boot(cloud_node, id_num=6)
- monitor = self.node_factory.start().proxy()
- monitor.cloud_node.get.return_value = cloud_node
- self.daemon.node_up(setup)
+ self.daemon.node_up(setup).get(self.TIMEOUT)
+ self.assertEqual(1, self.alive_monitor_count())
+ monitor = self.monitor_list()[0].proxy()
self.daemon.update_server_wishlist([])
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.assertTrue(self.node_shutdown.start.called,
@@ -186,7 +202,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
self.assertTrue(shutdown.stop.called,
"shutdown actor not stopped after finishing")
- self.assertTrue(monitor.stop.called,
+ self.assertTrue(monitor.actor_ref.actor_stopped.wait(self.TIMEOUT),
"monitor for booted node not stopped after shutdown")
self.daemon.update_server_wishlist(
[testutil.MockSize(2)]).get(self.TIMEOUT)
@@ -205,23 +221,38 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
cloud_node = testutil.cloud_node_mock(1)
size = testutil.MockSize(1)
self.make_daemon(cloud_nodes=[cloud_node], want_sizes=[size])
- self.daemon.node_can_shutdown(
- self.node_factory.start().proxy()).get(self.TIMEOUT)
+ self.assertEqual(1, self.alive_monitor_count())
+ monitor = self.monitor_list()[0].proxy()
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
self.assertFalse(self.node_shutdown.start.called)
def test_shutdown_accepted_below_capacity(self):
self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
- node_actor = self.node_factory().proxy()
- self.daemon.node_can_shutdown(node_actor).get(self.TIMEOUT)
+ self.assertEqual(1, self.alive_monitor_count())
+ monitor = self.monitor_list()[0].proxy()
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
self.assertTrue(self.node_shutdown.start.called)
+ def test_shutdown_declined_when_idle_and_job_queued(self):
+ cloud_nodes = [testutil.cloud_node_mock(n) for n in [3, 4]]
+ arv_nodes = [testutil.arvados_node_mock(3, job_uuid=True),
+ testutil.arvados_node_mock(4, job_uuid=None)]
+ self.make_daemon(cloud_nodes, arv_nodes, [testutil.MockSize(1)])
+ self.assertEqual(2, self.alive_monitor_count())
+ for mon_ref in self.monitor_list():
+ monitor = mon_ref.proxy()
+ if monitor.cloud_node.get(self.TIMEOUT) is cloud_nodes[-1]:
+ break
+ else:
+ self.fail("monitor for idle node not found")
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ self.assertFalse(self.node_shutdown.start.called)
+
def test_clean_shutdown_waits_for_node_setup_finish(self):
- self.make_daemon(want_sizes=[testutil.MockSize(1)])
- self.daemon.max_nodes.get(self.TIMEOUT)
- self.assertTrue(self.node_setup.start.called)
- new_node = self.node_setup.start().proxy()
+ new_node = self.start_node_boot()
self.daemon.shutdown().get(self.TIMEOUT)
self.assertTrue(new_node.stop_if_no_cloud_node.called)
self.daemon.node_up(new_node).get(self.TIMEOUT)
diff --git a/services/nodemanager/tests/testutil.py b/services/nodemanager/tests/testutil.py
index 0c63db3..a1b0658 100644
--- a/services/nodemanager/tests/testutil.py
+++ b/services/nodemanager/tests/testutil.py
@@ -15,7 +15,7 @@ def arvados_node_mock(node_num=99, job_uuid=None, age=0, **kwargs):
if job_uuid is True:
job_uuid = 'zzzzz-jjjjj-jobjobjobjobjob'
slurm_state = 'idle' if (job_uuid is None) else 'alloc'
- node = {'uuid': 'zzzzz-yyyyy-12345abcde67890',
+ node = {'uuid': 'zzzzz-yyyyy-{:015x}'.format(node_num),
'created_at': '2014-01-01T01:02:03Z',
'modified_at': time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime(time.time() - age)),
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list