[ARVADOS] created: b0e6de37cd54cabecdca3c8338410d7f8e7b8ae7
git at public.curoverse.com
git at public.curoverse.com
Fri Oct 31 14:00:12 EDT 2014
at b0e6de37cd54cabecdca3c8338410d7f8e7b8ae7 (commit)
commit b0e6de37cd54cabecdca3c8338410d7f8e7b8ae7
Author: Brett Smith <brett at curoverse.com>
Date: Fri Oct 31 13:59:59 2014 -0400
4357: Node Manager boots new nodes when up nodes are busy.
The key part of this commit is the change to _nodes_wanted.
Everything else provides the information to make that calculation.
This changes the daemon test to use real monitor actors, rather than a
mock. This hurts test isolation a bit, but it's really tricky to mock
individual monitors otherwise. Because monitors only communicate with
the daemon itself, this seems like a worthwhile trade-off; it doesn't
introduce external dependencies or noticeably increase test time.
diff --git a/services/nodemanager/arvnodeman/computenode/__init__.py b/services/nodemanager/arvnodeman/computenode/__init__.py
index 0d4ee7b..6e156cb 100644
--- a/services/nodemanager/arvnodeman/computenode/__init__.py
+++ b/services/nodemanager/arvnodeman/computenode/__init__.py
@@ -12,6 +12,11 @@ import pykka
from ..clientactor import _notify_subscribers
from .. import config
+# Node states - mostly matching SLURM states
+UNKNOWN = 0
+IDLE = 50
+ALLOC = 100
+
def arvados_node_fqdn(arvados_node, default_hostname='dynamic.compute'):
hostname = arvados_node.get('hostname') or default_hostname
return '{}.{}'.format(hostname, arvados_node['domain'])
@@ -347,14 +352,27 @@ class ComputeNodeMonitorActor(config.actor_class):
self._last_log = msg
self._logger.debug(msg, *args)
+ def state(self):
+ if ((self.arvados_node is None) or
+ (not timestamp_fresh(arvados_node_mtime(self.arvados_node),
+ self.poll_stale_after))):
+ return UNKNOWN
+ elif ((self.arvados_node['info'].get('slurm_state') == 'idle') and
+ (not self.arvados_node['job_uuid'])):
+ return IDLE
+ else:
+ return ALLOC
+
def _shutdown_eligible(self):
- if self.arvados_node is None:
- return timestamp_fresh(self.cloud_node_start_time,
- self.node_stale_after)
+ state = self.state()
+ if state == IDLE:
+ return True
+ elif state == UNKNOWN:
+ return ((self.arvados_node is None) and
+ timestamp_fresh(self.cloud_node_start_time,
+ self.node_stale_after))
else:
- return (timestamp_fresh(arvados_node_mtime(self.arvados_node),
- self.poll_stale_after) and
- (self.arvados_node['info'].get('slurm_state') == 'idle'))
+ return False
def consider_shutdown(self):
next_opening = self._shutdowns.next_opening()
diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py
index 6ea3cdf..3f42674 100644
--- a/services/nodemanager/arvnodeman/daemon.py
+++ b/services/nodemanager/arvnodeman/daemon.py
@@ -192,15 +192,22 @@ class NodeManagerDaemonActor(actor_class):
[self.cloud_nodes, self.booted, self.booting])
return up - len(self.shutdowns)
+ def _nodes_busy(self):
+ return sum(1 for state in
+ pykka.get_all(rec.actor.state() for rec in
+ self.cloud_nodes.nodes.itervalues())
+ if state == cnode.ALLOC)
+
def _nodes_wanted(self):
- return len(self.last_wishlist) - self._node_count()
+ return min(len(self.last_wishlist) + self._nodes_busy(),
+ self.max_nodes) - self._node_count()
def _nodes_excess(self):
- return -self._nodes_wanted()
+ return self._node_count() - len(self.last_wishlist)
def update_server_wishlist(self, wishlist):
self._update_poll_time('server_wishlist')
- self.last_wishlist = wishlist[:self.max_nodes]
+ self.last_wishlist = wishlist
nodes_wanted = self._nodes_wanted()
if nodes_wanted > 0:
self._later.start_node()
diff --git a/services/nodemanager/tests/test_computenode.py b/services/nodemanager/tests/test_computenode.py
index 05022f0..903ff82 100644
--- a/services/nodemanager/tests/test_computenode.py
+++ b/services/nodemanager/tests/test_computenode.py
@@ -188,6 +188,26 @@ class ComputeNodeMonitorActorTestCase(testutil.ActorTestMixin,
self.updates, arv_node).proxy()
self.node_actor.subscribe(self.subscriber).get(self.TIMEOUT)
+ def node_state(self):
+ return self.node_actor.state().get(self.TIMEOUT)
+
+ def test_state_unknown_without_pairing(self):
+ self.make_actor()
+ self.assertEqual(cnode.UNKNOWN, self.node_state())
+
+ def test_idle_state(self):
+ self.make_actor(2, arv_node=testutil.arvados_node_mock(job_uuid=None))
+ self.assertEqual(cnode.IDLE, self.node_state())
+
+ def test_alloc_state(self):
+ self.make_actor(3, arv_node=testutil.arvados_node_mock(job_uuid=True))
+ self.assertEqual(cnode.ALLOC, self.node_state())
+
+ def test_state_unknown_with_stale_pairing(self):
+ self.make_actor(4, arv_node=testutil.arvados_node_mock(
+ job_uuid=True, age=90000))
+ self.assertEqual(cnode.UNKNOWN, self.node_state())
+
def test_init_shutdown_scheduling(self):
self.make_actor()
self.assertTrue(self.timer.schedule.called)
diff --git a/services/nodemanager/tests/test_daemon.py b/services/nodemanager/tests/test_daemon.py
index 869ae4a..00955c9 100644
--- a/services/nodemanager/tests/test_daemon.py
+++ b/services/nodemanager/tests/test_daemon.py
@@ -6,13 +6,16 @@ import time
import unittest
import mock
+import pykka
+import arvnodeman.computenode as nmcnode
import arvnodeman.daemon as nmdaemon
from . import testutil
class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
unittest.TestCase):
- def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[]):
+ def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
+ max_nodes=8):
for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
self.arv_factory = mock.MagicMock(name='arvados_mock')
@@ -20,15 +23,14 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.cloud_factory().node_start_time.return_value = time.time()
self.cloud_updates = mock.MagicMock(name='updates_mock')
self.timer = testutil.MockTimer()
- self.node_factory = mock.MagicMock(name='factory_mock')
self.node_setup = mock.MagicMock(name='setup_mock')
self.node_shutdown = mock.MagicMock(name='shutdown_mock')
self.daemon = nmdaemon.NodeManagerDaemonActor.start(
self.server_wishlist_poller, self.arvados_nodes_poller,
self.cloud_nodes_poller, self.cloud_updates, self.timer,
self.arv_factory, self.cloud_factory,
- [54, 5, 1], 8, 600, 3600,
- self.node_setup, self.node_shutdown, self.node_factory).proxy()
+ [54, 5, 1], max_nodes, 600, 3600,
+ self.node_setup, self.node_shutdown).proxy()
if cloud_nodes is not None:
self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
if arvados_nodes is not None:
@@ -36,6 +38,12 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
if want_sizes is not None:
self.daemon.update_server_wishlist(want_sizes).get(self.TIMEOUT)
+ def monitor_list(self):
+ return pykka.ActorRegistry.get_by_class(nmcnode.ComputeNodeMonitorActor)
+
+ def monitor_count(self):
+ return sum(1 for actor in self.monitor_list() if actor.is_alive())
+
def test_easy_node_creation(self):
size = testutil.MockSize(1)
self.make_daemon(want_sizes=[size])
@@ -47,23 +55,22 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
arv_node = testutil.arvados_node_mock(1)
self.make_daemon([cloud_node], [arv_node])
self.stop_proxy(self.daemon)
- self.node_factory.start().proxy().offer_arvados_pair.assert_called_with(
+ self.assertEqual(1, self.monitor_count())
+ self.assertIs(
+ self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
arv_node)
def test_node_pairing_after_arvados_update(self):
cloud_node = testutil.cloud_node_mock(2)
- arv_node = testutil.arvados_node_mock(2, ip_address=None)
- self.make_daemon([cloud_node], None)
- pair_func = self.node_factory.start().proxy().offer_arvados_pair
- pair_func().get.return_value = None
- self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
- pair_func.assert_called_with(arv_node)
-
- pair_func().get.return_value = cloud_node.id
- pair_func.reset_mock()
+ self.make_daemon([cloud_node],
+ [testutil.arvados_node_mock(2, ip_address=None)])
arv_node = testutil.arvados_node_mock(2)
self.daemon.update_arvados_nodes([arv_node]).get(self.TIMEOUT)
- pair_func.assert_called_with(arv_node)
+ self.stop_proxy(self.daemon)
+ self.assertEqual(1, self.monitor_count())
+ self.assertIs(
+ self.monitor_list()[0].proxy().arvados_node.get(self.TIMEOUT),
+ arv_node)
def test_old_arvados_node_not_double_assigned(self):
arv_node = testutil.arvados_node_mock(3, age=9000)
@@ -100,9 +107,23 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.stop_proxy(self.daemon)
self.assertEqual(1, self.node_setup.start.call_count)
+ def test_boot_new_node_when_all_nodes_busy(self):
+ arv_node = testutil.arvados_node_mock(2, job_uuid=True)
+ self.make_daemon([testutil.cloud_node_mock(2)], [arv_node],
+ [testutil.MockSize(2)])
+ self.stop_proxy(self.daemon)
+ self.assertTrue(self.node_setup.start.called)
+
+ def test_no_new_node_when_max_nodes_busy(self):
+ self.make_daemon([testutil.cloud_node_mock(3)],
+ [testutil.arvados_node_mock(3, job_uuid=True)],
+ [testutil.MockSize(3)],
+ max_nodes=1)
+ self.stop_proxy(self.daemon)
+ self.assertFalse(self.node_setup.start.called)
+
def mock_setup_actor(self, cloud_node, arv_node):
- setup = mock.MagicMock(name='setup_node_mock')
- setup.actor_ref = self.node_setup.start().proxy().actor_ref
+ setup = self.node_setup.start().proxy()
self.node_setup.reset_mock()
setup.actor_urn = cloud_node.id
setup.cloud_node.get.return_value = cloud_node
@@ -125,17 +146,16 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
# get the "node up" message from CloudNodeSetupActor.
cloud_node = testutil.cloud_node_mock(1)
setup = self.start_node_boot(cloud_node)
- self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
- self.assertTrue(self.node_factory.start.called)
+ self.daemon.update_cloud_nodes([cloud_node])
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertEqual(1, self.node_factory.start.call_count)
+ self.assertEqual(1, self.monitor_count())
def test_no_duplication_when_booted_node_listed(self):
cloud_node = testutil.cloud_node_mock(2)
setup = self.start_node_boot(cloud_node, id_num=2)
self.daemon.node_up(setup)
self.daemon.update_cloud_nodes([cloud_node]).get(self.TIMEOUT)
- self.assertEqual(1, self.node_factory.start.call_count)
+ self.assertEqual(1, self.monitor_count())
def test_node_counted_after_boot_with_slow_listing(self):
# Test that, after we boot a compute node, we assume it exists
@@ -143,14 +163,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
# propagating tags).
setup = self.start_node_boot()
self.daemon.node_up(setup).get(self.TIMEOUT)
- self.assertTrue(self.node_factory.start.called,
- "daemon not monitoring booted node")
- self.daemon.update_cloud_nodes([])
- self.stop_proxy(self.daemon)
- self.assertEqual(1, self.node_factory.start.call_count,
- "daemon has duplicate monitors for booted node")
- self.assertFalse(self.node_factory.start().proxy().stop.called,
- "daemon prematurely stopped monitoring a new node")
+ self.assertEqual(1, self.monitor_count())
+ self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+ self.assertEqual(1, self.monitor_count())
def test_booted_unlisted_node_counted(self):
setup = self.start_node_boot(id_num=1)
@@ -163,10 +178,11 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def test_booted_node_can_shutdown(self):
setup = self.start_node_boot()
- self.daemon.node_up(setup)
+ self.daemon.node_up(setup).get(self.TIMEOUT)
+ self.assertEqual(1, self.monitor_count())
+ monitor = self.monitor_list()[0].proxy()
self.daemon.update_server_wishlist([])
- self.daemon.node_can_shutdown(
- self.node_factory.start().proxy()).get(self.TIMEOUT)
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
self.assertTrue(self.node_shutdown.start.called,
"daemon did not shut down booted node on offer")
@@ -174,9 +190,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
def test_booted_node_lifecycle(self):
cloud_node = testutil.cloud_node_mock(6)
setup = self.start_node_boot(cloud_node, id_num=6)
- monitor = self.node_factory.start().proxy()
- monitor.cloud_node.get.return_value = cloud_node
- self.daemon.node_up(setup)
+ self.daemon.node_up(setup).get(self.TIMEOUT)
+ self.assertEqual(1, self.monitor_count())
+ monitor = self.monitor_list()[0].proxy()
self.daemon.update_server_wishlist([])
self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.assertTrue(self.node_shutdown.start.called,
@@ -186,7 +202,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.daemon.node_finished_shutdown(shutdown).get(self.TIMEOUT)
self.assertTrue(shutdown.stop.called,
"shutdown actor not stopped after finishing")
- self.assertTrue(monitor.stop.called,
+ self.assertTrue(monitor.actor_ref.actor_stopped.wait(self.TIMEOUT),
"monitor for booted node not stopped after shutdown")
self.daemon.update_server_wishlist(
[testutil.MockSize(2)]).get(self.TIMEOUT)
@@ -205,23 +221,22 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
cloud_node = testutil.cloud_node_mock(1)
size = testutil.MockSize(1)
self.make_daemon(cloud_nodes=[cloud_node], want_sizes=[size])
- self.daemon.node_can_shutdown(
- self.node_factory.start().proxy()).get(self.TIMEOUT)
+ self.assertEqual(1, self.monitor_count())
+ monitor = self.monitor_list()[0].proxy()
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
self.assertFalse(self.node_shutdown.start.called)
def test_shutdown_accepted_below_capacity(self):
self.make_daemon(cloud_nodes=[testutil.cloud_node_mock()])
- node_actor = self.node_factory().proxy()
- self.daemon.node_can_shutdown(node_actor).get(self.TIMEOUT)
+ self.assertEqual(1, self.monitor_count())
+ monitor = self.monitor_list()[0].proxy()
+ self.daemon.node_can_shutdown(monitor).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
self.assertTrue(self.node_shutdown.start.called)
def test_clean_shutdown_waits_for_node_setup_finish(self):
- self.make_daemon(want_sizes=[testutil.MockSize(1)])
- self.daemon.max_nodes.get(self.TIMEOUT)
- self.assertTrue(self.node_setup.start.called)
- new_node = self.node_setup.start().proxy()
+ new_node = self.start_node_boot()
self.daemon.shutdown().get(self.TIMEOUT)
self.assertTrue(new_node.stop_if_no_cloud_node.called)
self.daemon.node_up(new_node).get(self.TIMEOUT)
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list