[ARVADOS] created: 2.1.0-872-g89f1ed83b

Git user git at public.arvados.org
Thu Sep 9 18:19:25 UTC 2021


        at  89f1ed83b7bf2b68254d8f1736a264fdfcf565a3 (commit)


commit 89f1ed83b7bf2b68254d8f1736a264fdfcf565a3
Author: Tom Clegg <tom at curii.com>
Date:   Mon Sep 6 15:48:13 2021 -0400

    18102: Avoid spamming logs in case of version mismatch.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/dispatchcloud/scheduler/sync.go b/lib/dispatchcloud/scheduler/sync.go
index fc683505f..038b4c8c5 100644
--- a/lib/dispatchcloud/scheduler/sync.go
+++ b/lib/dispatchcloud/scheduler/sync.go
@@ -13,6 +13,8 @@ import (
 	"github.com/sirupsen/logrus"
 )
 
+var reportedUnexpectedState = false
+
 // sync resolves discrepancies between the queue and the pool:
 //
 // Lingering crunch-run processes for finalized and unlocked/requeued
@@ -82,10 +84,13 @@ func (sch *Scheduler) sync() {
 				go sch.requeue(ent, "priority=0")
 			}
 		default:
-			sch.logger.WithFields(logrus.Fields{
-				"ContainerUUID": uuid,
-				"State":         ent.Container.State,
-			}).Error("BUG: unexpected state")
+			if !reportedUnexpectedState {
+				sch.logger.WithFields(logrus.Fields{
+					"ContainerUUID": uuid,
+					"State":         ent.Container.State,
+				}).Error("BUG: unexpected state")
+				reportedUnexpectedState = true
+			}
 		}
 	}
 	for uuid := range running {

commit 112b077315209bfa1f7b54bc7a60cc1fe3d92d82
Author: Tom Clegg <tom at curii.com>
Date:   Mon Sep 6 21:51:40 2021 -0400

    18102: Fix flapping lock/unlock.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/dispatchcloud/container/queue.go b/lib/dispatchcloud/container/queue.go
index 7a2727c1e..938ef915f 100644
--- a/lib/dispatchcloud/container/queue.go
+++ b/lib/dispatchcloud/container/queue.go
@@ -31,6 +31,7 @@ type QueueEnt struct {
 	// populated.
 	Container    arvados.Container    `json:"container"`
 	InstanceType arvados.InstanceType `json:"instance_type"`
+	FirstSeenAt  time.Time            `json:"first_seen_at"`
 }
 
 // String implements fmt.Stringer by returning the queued container's
@@ -229,6 +230,7 @@ func (cq *Queue) delEnt(uuid string, state arvados.ContainerState) {
 	delete(cq.current, uuid)
 }
 
+// Caller must have lock.
 func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
 	it, err := cq.chooseType(&ctr)
 	if err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) {
@@ -284,7 +286,7 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
 		"Priority":      ctr.Priority,
 		"InstanceType":  it.Name,
 	}).Info("adding container to queue")
-	cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it}
+	cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it, FirstSeenAt: time.Now()}
 }
 
 // Lock acquires the dispatch lock for the given container.
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index d2f6d1c2c..5bd4477ca 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -20,7 +20,16 @@ func (sch *Scheduler) runQueue() {
 		sorted = append(sorted, ent)
 	}
 	sort.Slice(sorted, func(i, j int) bool {
-		return sorted[i].Container.Priority > sorted[j].Container.Priority
+		if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
+			return pi > pj
+		} else {
+			// When containers have identical priority,
+			// start them in the order we first noticed
+			// them. This avoids extra lock/unlock cycles
+			// when we unlock the containers that don't
+			// fit in the available pool.
+			return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
+		}
 	})
 
 	running := sch.pool.Running()
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index c8d45cbd1..5b5fa960a 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -263,6 +263,65 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
 	}
 }
 
+// Don't flap lock/unlock when equal-priority containers compete for
+// limited workers.
+//
+// (Unless we use FirstSeenAt as a secondary sort key, each runQueue()
+// tends to choose a different one of the equal-priority containers as
+// the "first" one that should be locked, and unlock the one it chose
+// last time. This generates logging noise, and fails containers by
+// reaching MaxDispatchAttempts quickly.)
+func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
+	logger := ctxlog.TestLogger(c)
+	ctx := ctxlog.Context(context.Background(), logger)
+	queue := test.Queue{
+		ChooseType: chooseType,
+		Logger:     logger,
+	}
+	for i := 0; i < 8; i++ {
+		queue.Containers = append(queue.Containers, arvados.Container{
+			UUID:     test.ContainerUUID(i),
+			Priority: 333,
+			State:    arvados.ContainerStateQueued,
+			RuntimeConstraints: arvados.RuntimeConstraints{
+				VCPUs: 3,
+				RAM:   3 << 30,
+			},
+		})
+	}
+	queue.Update()
+	pool := stubPool{
+		quota: 2,
+		unalloc: map[arvados.InstanceType]int{
+			test.InstanceType(3): 1,
+		},
+		idle: map[arvados.InstanceType]int{
+			test.InstanceType(3): 1,
+		},
+		running:   map[string]time.Time{},
+		creates:   []arvados.InstanceType{},
+		starts:    []string{},
+		canCreate: 1,
+	}
+	sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+	for i := 0; i < 30; i++ {
+		sch.runQueue()
+		sch.sync()
+		time.Sleep(time.Millisecond)
+	}
+	c.Check(pool.shutdowns, check.Equals, 0)
+	c.Check(pool.starts, check.HasLen, 1)
+	unlocked := map[string]int{}
+	for _, chg := range queue.StateChanges() {
+		if chg.To == arvados.ContainerStateQueued {
+			unlocked[chg.UUID]++
+		}
+	}
+	for uuid, count := range unlocked {
+		c.Check(count, check.Equals, 1, check.Commentf("%s", uuid))
+	}
+}
+
 // Start lower-priority containers while waiting for new/existing
 // workers to come up for higher-priority containers.
 func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
diff --git a/lib/dispatchcloud/test/queue.go b/lib/dispatchcloud/test/queue.go
index 5973d1639..fcb2cfb33 100644
--- a/lib/dispatchcloud/test/queue.go
+++ b/lib/dispatchcloud/test/queue.go
@@ -160,6 +160,7 @@ func (q *Queue) Update() error {
 			upd[ctr.UUID] = container.QueueEnt{
 				Container:    ctr,
 				InstanceType: it,
+				FirstSeenAt:  time.Now(),
 			}
 		}
 	}

commit 10889ac0f61add4f51ce832c8ecd76cc1a426bfd
Author: Tom Clegg <tom at curii.com>
Date:   Mon Sep 6 14:47:03 2021 -0400

    18102: Fix double-unlock in scheduler.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index b9d653a82..d2f6d1c2c 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -66,8 +66,7 @@ tryrun:
 				// starve this one by using keeping
 				// idle workers alive on different
 				// instance types.
-				logger.Debug("unlocking: AtQuota and no unalloc workers")
-				sch.queue.Unlock(ctr.UUID)
+				logger.Debug("overquota")
 				overquota = sorted[i:]
 				break tryrun
 			} else if logger.Info("creating new instance"); sch.pool.Create(it) {
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index fd1d0a870..c8d45cbd1 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -244,15 +244,22 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
 			starts:    []string{},
 			canCreate: 0,
 		}
-		New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+		sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+		sch.runQueue()
+		sch.sync()
+		sch.runQueue()
+		sch.sync()
 		c.Check(pool.creates, check.DeepEquals, shouldCreate)
 		if len(shouldCreate) == 0 {
 			c.Check(pool.starts, check.DeepEquals, []string{})
-			c.Check(pool.shutdowns, check.Not(check.Equals), 0)
 		} else {
 			c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
-			c.Check(pool.shutdowns, check.Equals, 0)
 		}
+		c.Check(pool.shutdowns, check.Equals, 3-quota)
+		c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+			{UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
+			{UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
+		})
 	}
 }
 
diff --git a/lib/dispatchcloud/test/queue.go b/lib/dispatchcloud/test/queue.go
index 3598ec6da..5973d1639 100644
--- a/lib/dispatchcloud/test/queue.go
+++ b/lib/dispatchcloud/test/queue.go
@@ -26,13 +26,27 @@ type Queue struct {
 
 	Logger logrus.FieldLogger
 
-	entries     map[string]container.QueueEnt
-	updTime     time.Time
-	subscribers map[<-chan struct{}]chan struct{}
+	entries      map[string]container.QueueEnt
+	updTime      time.Time
+	subscribers  map[<-chan struct{}]chan struct{}
+	stateChanges []QueueStateChange
 
 	mtx sync.Mutex
 }
 
+type QueueStateChange struct {
+	UUID string
+	From arvados.ContainerState
+	To   arvados.ContainerState
+}
+
+// All calls to Lock/Unlock/Cancel to date.
+func (q *Queue) StateChanges() []QueueStateChange {
+	q.mtx.Lock()
+	defer q.mtx.Unlock()
+	return q.stateChanges
+}
+
 // Entries returns the containers that were queued when Update was
 // last called.
 func (q *Queue) Entries() (map[string]container.QueueEnt, time.Time) {
@@ -111,6 +125,7 @@ func (q *Queue) notify() {
 // caller must have lock.
 func (q *Queue) changeState(uuid string, from, to arvados.ContainerState) error {
 	ent := q.entries[uuid]
+	q.stateChanges = append(q.stateChanges, QueueStateChange{uuid, from, to})
 	if ent.Container.State != from {
 		return fmt.Errorf("changeState failed: state=%q", ent.Container.State)
 	}

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list