[arvados] updated: 2.6.0-199-g924951b42

git repository hosting git at public.arvados.org
Thu May 25 21:46:01 UTC 2023


Summary of changes:
 lib/dispatchcloud/scheduler/run_queue.go      | 16 ++++++--
 lib/dispatchcloud/scheduler/run_queue_test.go | 56 +++++++++++++++++++++++++++
 2 files changed, 69 insertions(+), 3 deletions(-)

       via  924951b429e4311d51935555fa5605825622be0c (commit)
      from  11fb3d91a28bf51803f170a53709d709941de788 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 924951b429e4311d51935555fa5605825622be0c
Author: Tom Clegg <tom at curii.com>
Date:   Thu May 25 17:45:50 2023 -0400

    20511: Fix allowing too many supervisor processes.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index adf897789..dcb348878 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -16,12 +16,25 @@ import (
 var quietAfter503 = time.Minute
 
 func (sch *Scheduler) runQueue() {
+	running := sch.pool.Running()
+	unalloc := sch.pool.Unallocated()
+
 	unsorted, _ := sch.queue.Entries()
 	sorted := make([]container.QueueEnt, 0, len(unsorted))
 	for _, ent := range unsorted {
 		sorted = append(sorted, ent)
 	}
 	sort.Slice(sorted, func(i, j int) bool {
+		_, irunning := running[sorted[i].Container.UUID]
+		_, jrunning := running[sorted[j].Container.UUID]
+		if irunning != jrunning {
+			// Ensure the "tryrun" loop (see below) sees
+			// already-scheduled containers first, to
+			// ensure existing supervisor containers are
+			// properly counted before we decide whether
+			// we have room for new ones.
+			return irunning
+		}
 		ilocked := sorted[i].Container.State == arvados.ContainerStateLocked
 		jlocked := sorted[j].Container.State == arvados.ContainerStateLocked
 		if ilocked != jlocked {
@@ -46,9 +59,6 @@ func (sch *Scheduler) runQueue() {
 		}
 	})
 
-	running := sch.pool.Running()
-	unalloc := sch.pool.Unallocated()
-
 	if t := sch.client.Last503(); t.After(sch.last503time) {
 		// API has sent an HTTP 503 response since last time
 		// we checked. Use current #containers - 1 as
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index 8192d4721..73602f810 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -430,6 +430,62 @@ func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {
 	})
 }
 
+// Assuming we're not at quota, don't try to shutdown idle nodes
+// merely because we have more queued/locked supervisor containers
+// than MaxSupervisors -- it won't help.
+func (*SchedulerSuite) TestExcessSupervisors(c *check.C) {
+	ctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))
+	queue := test.Queue{
+		ChooseType: chooseType,
+	}
+	for i := 1; i <= 8; i++ {
+		queue.Containers = append(queue.Containers, arvados.Container{
+			UUID:     test.ContainerUUID(i),
+			Priority: int64(1000 + i),
+			State:    arvados.ContainerStateQueued,
+			RuntimeConstraints: arvados.RuntimeConstraints{
+				VCPUs: 2,
+				RAM:   2 << 30,
+			},
+			SchedulingParameters: arvados.SchedulingParameters{
+				Supervisor: true,
+			},
+		})
+	}
+	for i := 2; i < 4; i++ {
+		queue.Containers[i].State = arvados.ContainerStateLocked
+	}
+	for i := 4; i < 6; i++ {
+		queue.Containers[i].State = arvados.ContainerStateRunning
+	}
+	queue.Update()
+	pool := stubPool{
+		quota: 16,
+		unalloc: map[arvados.InstanceType]int{
+			test.InstanceType(2): 2,
+		},
+		idle: map[arvados.InstanceType]int{
+			test.InstanceType(2): 1,
+		},
+		running: map[string]time.Time{
+			test.ContainerUUID(5): {},
+			test.ContainerUUID(6): {},
+		},
+		creates:   []arvados.InstanceType{},
+		starts:    []string{},
+		canCreate: 0,
+	}
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 4)
+	sch.sync()
+	sch.runQueue()
+	sch.sync()
+
+	c.Check(pool.starts, check.HasLen, 2)
+	c.Check(pool.shutdowns, check.Equals, 0)
+	c.Check(pool.creates, check.HasLen, 0)
+	c.Check(queue.StateChanges(), check.HasLen, 0)
+}
+
 // Don't flap lock/unlock when equal-priority containers compete for
 // limited workers.
 //

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list