[ARVADOS] created: fb2099b1de222b4aa05dd2ce12654ff32da3c18d
git at public.curoverse.com
git at public.curoverse.com
Fri Dec 19 12:09:20 EST 2014
at fb2099b1de222b4aa05dd2ce12654ff32da3c18d (commit)
commit fb2099b1de222b4aa05dd2ce12654ff32da3c18d
Author: Brett Smith <brett at curoverse.com>
Date: Fri Dec 19 12:09:17 2014 -0500
4844: Node Manager doesn't treat min_nodes as min_nodes_idle.
There's a bad interaction between the past bugfixes to (a) implement
min_nodes, and (b) boot new nodes when existing nodes are busy.
Because min_nodes has been implemented at the server wishlist level in
the past, the daemon can't distinguish between "nodes requested to
fulfill min_nodes" and "nodes requested to fulfill jobs."
This commit puts all the responsibility for enforcing min_nodes in the
daemon, so that the server wishlist always represents real job
requirements. This lets the daemon correctly decide whether or not to
boot a new node when >= min_nodes are busy.
diff --git a/services/nodemanager/arvnodeman/daemon.py b/services/nodemanager/arvnodeman/daemon.py
index 53af933..0e48078 100644
--- a/services/nodemanager/arvnodeman/daemon.py
+++ b/services/nodemanager/arvnodeman/daemon.py
@@ -97,7 +97,7 @@ class NodeManagerDaemonActor(actor_class):
def __init__(self, server_wishlist_actor, arvados_nodes_actor,
cloud_nodes_actor, cloud_update_actor, timer_actor,
arvados_factory, cloud_factory,
- shutdown_windows, min_nodes, max_nodes,
+ shutdown_windows, min_size, min_nodes, max_nodes,
poll_stale_after=600,
boot_fail_after=1800,
node_stale_after=7200,
@@ -116,6 +116,7 @@ class NodeManagerDaemonActor(actor_class):
self._logger = logging.getLogger('arvnodeman.daemon')
self._later = self.actor_ref.proxy()
self.shutdown_windows = shutdown_windows
+ self.min_cloud_size = min_size
self.min_nodes = min_nodes
self.max_nodes = max_nodes
self.poll_stale_after = poll_stale_after
@@ -207,9 +208,12 @@ class NodeManagerDaemonActor(actor_class):
def _nodes_wanted(self):
up_count = self._nodes_up()
+ under_min = self.min_nodes - up_count
over_max = up_count - self.max_nodes
if over_max >= 0:
return -over_max
+ elif under_min > 0:
+ return under_min
else:
up_count -= len(self.shutdowns) + self._nodes_busy()
return len(self.last_wishlist) - up_count
@@ -254,7 +258,10 @@ class NodeManagerDaemonActor(actor_class):
if nodes_wanted < 1:
return None
arvados_node = self.arvados_nodes.find_stale_node(self.node_stale_after)
- cloud_size = self.last_wishlist[nodes_wanted - 1]
+ try:
+ cloud_size = self.last_wishlist[self._nodes_up()]
+ except IndexError:
+ cloud_size = self.min_cloud_size
self._logger.info("Want %s more nodes. Booting a %s node.",
nodes_wanted, cloud_size.name)
new_setup = self._node_setup.start(
diff --git a/services/nodemanager/arvnodeman/jobqueue.py b/services/nodemanager/arvnodeman/jobqueue.py
index 239934f..06f66b7 100644
--- a/services/nodemanager/arvnodeman/jobqueue.py
+++ b/services/nodemanager/arvnodeman/jobqueue.py
@@ -38,11 +38,10 @@ class ServerCalculator(object):
return True
- def __init__(self, server_list, min_nodes=0, max_nodes=None):
+ def __init__(self, server_list, max_nodes=None):
self.cloud_sizes = [self.CloudSizeWrapper(s, **kws)
for s, kws in server_list]
self.cloud_sizes.sort(key=lambda s: s.price)
- self.min_nodes = min_nodes
self.max_nodes = max_nodes or float('inf')
self.logger = logging.getLogger('arvnodeman.jobqueue')
self.logged_jobs = set()
@@ -79,15 +78,11 @@ class ServerCalculator(object):
elif (want_count <= self.max_nodes):
servers.extend([cloud_size.real] * max(1, want_count))
self.logged_jobs.intersection_update(seen_jobs)
-
- # Make sure the server queue has at least enough entries to
- # satisfy min_nodes.
- node_shortfall = self.min_nodes - len(servers)
- if node_shortfall > 0:
- basic_node = self.cloud_size_for_constraints({})
- servers.extend([basic_node.real] * node_shortfall)
return servers
+ def cheapest_size(self):
+ return self.cloud_sizes[0]
+
class JobQueueMonitorActor(clientactor.RemotePollLoopActor):
"""Actor to generate server wishlists from the job queue.
diff --git a/services/nodemanager/arvnodeman/launcher.py b/services/nodemanager/arvnodeman/launcher.py
index 5fa404f..8801582 100644
--- a/services/nodemanager/arvnodeman/launcher.py
+++ b/services/nodemanager/arvnodeman/launcher.py
@@ -57,25 +57,22 @@ def setup_logging(path, level, **sublevels):
sublogger = logging.getLogger(logger_name)
sublogger.setLevel(sublevel)
-def launch_pollers(config):
- cloud_client = config.new_cloud_client()
- arvados_client = config.new_arvados_client()
- cloud_size_list = config.node_sizes(cloud_client.list_sizes())
+def build_server_calculator(config):
+ cloud_size_list = config.node_sizes(config.new_cloud_client().list_sizes())
if not cloud_size_list:
abort("No valid node sizes configured")
+ return ServerCalculator(cloud_size_list,
+ config.getint('Daemon', 'max_nodes'))
- server_calculator = ServerCalculator(
- cloud_size_list,
- config.getint('Daemon', 'min_nodes'),
- config.getint('Daemon', 'max_nodes'))
+def launch_pollers(config, server_calculator):
poll_time = config.getint('Daemon', 'poll_time')
max_poll_time = config.getint('Daemon', 'max_poll_time')
timer = TimedCallBackActor.start(poll_time / 10.0).proxy()
cloud_node_poller = CloudNodeListMonitorActor.start(
- cloud_client, timer, poll_time, max_poll_time).proxy()
+ config.new_cloud_client(), timer, poll_time, max_poll_time).proxy()
arvados_node_poller = ArvadosNodeListMonitorActor.start(
- arvados_client, timer, poll_time, max_poll_time).proxy()
+ config.new_arvados_client(), timer, poll_time, max_poll_time).proxy()
job_queue_poller = JobQueueMonitorActor.start(
config.new_arvados_client(), timer, server_calculator,
poll_time, max_poll_time).proxy()
@@ -108,14 +105,16 @@ def main(args=None):
setup_logging(config.get('Logging', 'file'), **config.log_levels())
node_setup, node_shutdown, node_update, node_monitor = \
config.dispatch_classes()
+ server_calculator = build_server_calculator(config)
timer, cloud_node_poller, arvados_node_poller, job_queue_poller = \
- launch_pollers(config)
+ launch_pollers(config, server_calculator)
cloud_node_updater = node_update.start(config.new_cloud_client).proxy()
node_daemon = NodeManagerDaemonActor.start(
job_queue_poller, arvados_node_poller, cloud_node_poller,
cloud_node_updater, timer,
config.new_arvados_client, config.new_cloud_client,
config.shutdown_windows(),
+ server_calculator.cheapest_size(),
config.getint('Daemon', 'min_nodes'),
config.getint('Daemon', 'max_nodes'),
config.getint('Daemon', 'poll_stale_after'),
diff --git a/services/nodemanager/tests/test_daemon.py b/services/nodemanager/tests/test_daemon.py
index ccd71d5..96fcde9 100644
--- a/services/nodemanager/tests/test_daemon.py
+++ b/services/nodemanager/tests/test_daemon.py
@@ -14,8 +14,14 @@ from . import testutil
class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
unittest.TestCase):
+ def new_setup_proxy(self):
+ # Make sure that every time the daemon starts a setup actor,
+ # it gets a new mock object back.
+ self.last_setup = mock.MagicMock(name='setup_proxy_mock')
+ return self.last_setup
+
def make_daemon(self, cloud_nodes=[], arvados_nodes=[], want_sizes=[],
- min_nodes=0, max_nodes=8):
+ min_size=testutil.MockSize(1), min_nodes=0, max_nodes=8):
for name in ['cloud_nodes', 'arvados_nodes', 'server_wishlist']:
setattr(self, name + '_poller', mock.MagicMock(name=name + '_mock'))
self.arv_factory = mock.MagicMock(name='arvados_mock')
@@ -24,12 +30,14 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.cloud_updates = mock.MagicMock(name='updates_mock')
self.timer = testutil.MockTimer(deliver_immediately=False)
self.node_setup = mock.MagicMock(name='setup_mock')
+ self.node_setup.start().proxy.side_effect = self.new_setup_proxy
+ self.node_setup.reset_mock()
self.node_shutdown = mock.MagicMock(name='shutdown_mock')
self.daemon = nmdaemon.NodeManagerDaemonActor.start(
self.server_wishlist_poller, self.arvados_nodes_poller,
self.cloud_nodes_poller, self.cloud_updates, self.timer,
self.arv_factory, self.cloud_factory,
- [54, 5, 1], min_nodes, max_nodes, 600, 1800, 3600,
+ [54, 5, 1], min_size, min_nodes, max_nodes, 600, 1800, 3600,
self.node_setup, self.node_shutdown).proxy()
if cloud_nodes is not None:
self.daemon.update_cloud_nodes(cloud_nodes).get(self.TIMEOUT)
@@ -91,12 +99,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
arv_node = testutil.arvados_node_mock(3, age=9000)
size = testutil.MockSize(3)
self.make_daemon(arvados_nodes=[arv_node])
- setup_ref = self.node_setup.start().proxy().actor_ref
- setup_ref.actor_urn = 0
- self.node_setup.start.reset_mock()
self.daemon.update_server_wishlist([size]).get(self.TIMEOUT)
- self.daemon.max_nodes.get(self.TIMEOUT)
- setup_ref.actor_urn += 1
self.daemon.update_server_wishlist([size, size]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
used_nodes = [call[1].get('arvados_node')
@@ -129,6 +132,26 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.stop_proxy(self.daemon)
self.assertTrue(self.node_setup.start.called)
+ def test_boot_new_node_below_min_nodes(self):
+ min_size = testutil.MockSize(1)
+ wish_size = testutil.MockSize(3)
+ self.make_daemon([], [], None, min_size=min_size, min_nodes=2)
+ self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+ self.daemon.update_cloud_nodes([]).get(self.TIMEOUT)
+ self.daemon.update_server_wishlist([wish_size]).get(self.TIMEOUT)
+ self.stop_proxy(self.daemon)
+ self.assertEqual([wish_size, min_size],
+ [call[1].get('cloud_size')
+ for call in self.node_setup.start.call_args_list])
+
+ def test_no_new_node_when_ge_min_nodes_busy(self):
+ cloud_nodes = [testutil.cloud_node_mock(n) for n in range(1, 4)]
+ arv_nodes = [testutil.arvados_node_mock(n, job_uuid=True)
+ for n in range(1, 4)]
+ self.make_daemon(cloud_nodes, arv_nodes, [], min_nodes=2)
+ self.stop_proxy(self.daemon)
+ self.assertEqual(0, self.node_setup.start.call_count)
+
def test_no_new_node_when_max_nodes_busy(self):
self.make_daemon([testutil.cloud_node_mock(3)],
[testutil.arvados_node_mock(3, job_uuid=True)],
@@ -137,14 +160,6 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.stop_proxy(self.daemon)
self.assertFalse(self.node_setup.start.called)
- def mock_setup_actor(self, cloud_node, arv_node):
- setup = self.node_setup.start().proxy()
- self.node_setup.reset_mock()
- setup.actor_urn = cloud_node.id
- setup.cloud_node.get.return_value = cloud_node
- setup.arvados_node.get.return_value = arv_node
- return setup
-
def start_node_boot(self, cloud_node=None, arv_node=None, id_num=1):
if cloud_node is None:
cloud_node = testutil.cloud_node_mock(id_num)
@@ -153,7 +168,9 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.make_daemon(want_sizes=[testutil.MockSize(id_num)])
self.daemon.max_nodes.get(self.TIMEOUT)
self.assertEqual(1, self.node_setup.start.call_count)
- return self.mock_setup_actor(cloud_node, arv_node)
+ self.last_setup.cloud_node.get.return_value = cloud_node
+ self.last_setup.arvados_node.get.return_value = arv_node
+ return self.last_setup
def test_no_duplication_when_booting_node_listed_fast(self):
# Test that we don't start two ComputeNodeMonitorActors when
@@ -188,8 +205,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.daemon.update_server_wishlist(
[testutil.MockSize(1)]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
- self.assertFalse(self.node_setup.start.called,
- "daemon did not count booted node toward wishlist")
+ self.assertEqual(1, self.node_setup.start.call_count)
def test_booted_node_can_shutdown(self):
setup = self.start_node_boot()
@@ -259,8 +275,7 @@ class NodeManagerDaemonActorTestCase(testutil.ActorTestMixin,
self.make_daemon(want_sizes=[testutil.MockSize(1)])
self.daemon.update_server_wishlist([]).get(self.TIMEOUT)
self.stop_proxy(self.daemon)
- self.assertTrue(
- self.node_setup.start().proxy().stop_if_no_cloud_node.called)
+ self.assertTrue(self.last_setup.stop_if_no_cloud_node.called)
def test_shutdown_declined_at_wishlist_capacity(self):
cloud_node = testutil.cloud_node_mock(1)
diff --git a/services/nodemanager/tests/test_jobqueue.py b/services/nodemanager/tests/test_jobqueue.py
index ae5bf1e..4c97aed 100644
--- a/services/nodemanager/tests/test_jobqueue.py
+++ b/services/nodemanager/tests/test_jobqueue.py
@@ -48,29 +48,15 @@ class ServerCalculatorTestCase(unittest.TestCase):
{'min_scratch_mb_per_node': 200})
self.assertEqual(6, len(servlist))
- def test_server_calc_min_nodes_0_jobs(self):
- servcalc = self.make_calculator([1], min_nodes=3, max_nodes=9)
- servlist = self.calculate(servcalc, {})
- self.assertEqual(3, len(servlist))
-
- def test_server_calc_min_nodes_1_job(self):
- servcalc = self.make_calculator([1], min_nodes=3, max_nodes=9)
- servlist = self.calculate(servcalc, {'min_nodes': 1})
- self.assertEqual(3, len(servlist))
-
- def test_server_calc_more_jobs_than_min_nodes(self):
- servcalc = self.make_calculator([1], min_nodes=2, max_nodes=9)
- servlist = self.calculate(servcalc,
- {'min_nodes': 1},
- {'min_nodes': 1},
- {'min_nodes': 1})
- self.assertEqual(3, len(servlist))
-
def test_job_requesting_max_nodes_accepted(self):
servcalc = self.make_calculator([1], max_nodes=4)
servlist = self.calculate(servcalc, {'min_nodes': 4})
self.assertEqual(4, len(servlist))
+ def test_cheapest_size(self):
+ servcalc = self.make_calculator([2, 4, 1, 3])
+ self.assertEqual(testutil.MockSize(1), servcalc.cheapest_size())
+
class JobQueueMonitorActorTestCase(testutil.RemotePollLoopActorTestMixin,
unittest.TestCase):
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list