[ARVADOS] created: 2.1.0-1288-g2a8b64963
Git user
git at public.arvados.org
Tue Sep 7 02:20:45 UTC 2021
at 2a8b64963448dd8d7613a8ecdd9a6d1e12152fca (commit)
commit 2a8b64963448dd8d7613a8ecdd9a6d1e12152fca
Author: Tom Clegg <tom at curii.com>
Date: Mon Sep 6 21:51:40 2021 -0400
18102: Fix flapping lock/unlock.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>
diff --git a/lib/dispatchcloud/container/queue.go b/lib/dispatchcloud/container/queue.go
index 7a2727c1e..938ef915f 100644
--- a/lib/dispatchcloud/container/queue.go
+++ b/lib/dispatchcloud/container/queue.go
@@ -31,6 +31,7 @@ type QueueEnt struct {
// populated.
Container arvados.Container `json:"container"`
InstanceType arvados.InstanceType `json:"instance_type"`
+ FirstSeenAt time.Time `json:"first_seen_at"`
}
// String implements fmt.Stringer by returning the queued container's
@@ -229,6 +230,7 @@ func (cq *Queue) delEnt(uuid string, state arvados.ContainerState) {
delete(cq.current, uuid)
}
+// Caller must have lock.
func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
it, err := cq.chooseType(&ctr)
if err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) {
@@ -284,7 +286,7 @@ func (cq *Queue) addEnt(uuid string, ctr arvados.Container) {
"Priority": ctr.Priority,
"InstanceType": it.Name,
}).Info("adding container to queue")
- cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it}
+ cq.current[uuid] = QueueEnt{Container: ctr, InstanceType: it, FirstSeenAt: time.Now()}
}
// Lock acquires the dispatch lock for the given container.
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index f11f35e1b..bf943a22c 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -27,7 +27,16 @@ func (sch *Scheduler) runQueue() {
sorted = append(sorted, ent)
}
sort.Slice(sorted, func(i, j int) bool {
- return sorted[i].Container.Priority > sorted[j].Container.Priority
+ if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {
+ return pi > pj
+ } else {
+ // When containers have identical priority,
+ // start them in the order we first noticed
+ // them. This avoids extra lock/unlock cycles
+ // when we unlock the containers that don't
+ // fit in the available pool.
+ return sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)
+ }
})
running := sch.pool.Running()
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index c8d45cbd1..5b5fa960a 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -263,6 +263,65 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
}
}
+// Don't flap lock/unlock when equal-priority containers compete for
+// limited workers.
+//
+// (Unless we use FirstSeenAt as a secondary sort key, each runQueue()
+// tends to choose a different one of the equal-priority containers as
+// the "first" one that should be locked, and unlock the one it chose
+// last time. This generates logging noise, and fails containers by
+// reaching MaxDispatchAttempts quickly.)
+func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
+ logger := ctxlog.TestLogger(c)
+ ctx := ctxlog.Context(context.Background(), logger)
+ queue := test.Queue{
+ ChooseType: chooseType,
+ Logger: logger,
+ }
+ for i := 0; i < 8; i++ {
+ queue.Containers = append(queue.Containers, arvados.Container{
+ UUID: test.ContainerUUID(i),
+ Priority: 333,
+ State: arvados.ContainerStateQueued,
+ RuntimeConstraints: arvados.RuntimeConstraints{
+ VCPUs: 3,
+ RAM: 3 << 30,
+ },
+ })
+ }
+ queue.Update()
+ pool := stubPool{
+ quota: 2,
+ unalloc: map[arvados.InstanceType]int{
+ test.InstanceType(3): 1,
+ },
+ idle: map[arvados.InstanceType]int{
+ test.InstanceType(3): 1,
+ },
+ running: map[string]time.Time{},
+ creates: []arvados.InstanceType{},
+ starts: []string{},
+ canCreate: 1,
+ }
+ sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ for i := 0; i < 30; i++ {
+ sch.runQueue()
+ sch.sync()
+ time.Sleep(time.Millisecond)
+ }
+ c.Check(pool.shutdowns, check.Equals, 0)
+ c.Check(pool.starts, check.HasLen, 1)
+ unlocked := map[string]int{}
+ for _, chg := range queue.StateChanges() {
+ if chg.To == arvados.ContainerStateQueued {
+ unlocked[chg.UUID]++
+ }
+ }
+ for uuid, count := range unlocked {
+ c.Check(count, check.Equals, 1, check.Commentf("%s", uuid))
+ }
+}
+
// Start lower-priority containers while waiting for new/existing
// workers to come up for higher-priority containers.
func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
diff --git a/lib/dispatchcloud/test/queue.go b/lib/dispatchcloud/test/queue.go
index 5973d1639..fcb2cfb33 100644
--- a/lib/dispatchcloud/test/queue.go
+++ b/lib/dispatchcloud/test/queue.go
@@ -160,6 +160,7 @@ func (q *Queue) Update() error {
upd[ctr.UUID] = container.QueueEnt{
Container: ctr,
InstanceType: it,
+ FirstSeenAt: time.Now(),
}
}
}
commit 7381073b9336f612f051517f33027436bd14f608
Author: Tom Clegg <tom at curii.com>
Date: Mon Sep 6 15:48:50 2021 -0400
18102: Improve logging.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index ab57a1e71..f11f35e1b 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -80,7 +80,7 @@ tryrun:
// starve this one by using keeping
// idle workers alive on different
// instance types.
- logger.Debug("overquota")
+ logger.Trace("overquota")
overquota = sorted[i:]
break tryrun
} else if logger.Info("creating new instance"); sch.pool.Create(it) {
@@ -93,6 +93,7 @@ tryrun:
// avoid getting starved here if
// instances of a specific type always
// fail.
+ logger.Trace("pool declined to create new instance")
continue
}
diff --git a/lib/dispatchcloud/scheduler/sync.go b/lib/dispatchcloud/scheduler/sync.go
index 326105a16..8e380f0d6 100644
--- a/lib/dispatchcloud/scheduler/sync.go
+++ b/lib/dispatchcloud/scheduler/sync.go
@@ -80,7 +80,7 @@ func (sch *Scheduler) sync() {
// a network outage and is still
// preparing to run a container that
// has already been unlocked/requeued.
- go sch.kill(uuid, fmt.Sprintf("state=%s", ent.Container.State))
+ go sch.kill(uuid, fmt.Sprintf("pool says running, but queue says state=%s", ent.Container.State))
} else if ent.Container.Priority == 0 {
sch.logger.WithFields(logrus.Fields{
"ContainerUUID": uuid,
@@ -132,6 +132,10 @@ func (sch *Scheduler) kill(uuid string, reason string) {
return
}
defer sch.uuidUnlock(uuid)
+ sch.logger.WithFields(logrus.Fields{
+ "ContainerUUID": uuid,
+ "reason": reason,
+ }).Debug("kill")
sch.pool.KillContainer(uuid, reason)
sch.pool.ForgetContainer(uuid)
}
commit a575b8003fc6984f04cb1a84a9693cc0565c9a46
Author: Tom Clegg <tom at curii.com>
Date: Mon Sep 6 15:48:13 2021 -0400
18102: Avoid spamming logs in case of version mismatch.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>
diff --git a/lib/dispatchcloud/scheduler/sync.go b/lib/dispatchcloud/scheduler/sync.go
index c9f325e26..326105a16 100644
--- a/lib/dispatchcloud/scheduler/sync.go
+++ b/lib/dispatchcloud/scheduler/sync.go
@@ -14,6 +14,8 @@ import (
"github.com/sirupsen/logrus"
)
+var reportedUnexpectedState = false
+
// sync resolves discrepancies between the queue and the pool:
//
// Lingering crunch-run processes for finalized and unlocked/requeued
@@ -96,10 +98,13 @@ func (sch *Scheduler) sync() {
go sch.requeue(ent, "priority=0")
}
default:
- sch.logger.WithFields(logrus.Fields{
- "ContainerUUID": uuid,
- "State": ent.Container.State,
- }).Error("BUG: unexpected state")
+ if !reportedUnexpectedState {
+ sch.logger.WithFields(logrus.Fields{
+ "ContainerUUID": uuid,
+ "State": ent.Container.State,
+ }).Error("BUG: unexpected state")
+ reportedUnexpectedState = true
+ }
}
}
for uuid := range running {
commit d57fcd076035e07a4134e3f23692a6bc1cbaafb1
Author: Tom Clegg <tom at curii.com>
Date: Mon Sep 6 15:47:29 2021 -0400
18102: Avoid overlapping operations on a container.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index d2f6d1c2c..ab57a1e71 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -14,6 +14,13 @@ import (
)
func (sch *Scheduler) runQueue() {
+ sch.mtx.Lock()
+ pendingOps := make(map[string]bool, len(sch.uuidOp))
+ for uuid := range sch.uuidOp {
+ pendingOps[uuid] = true
+ }
+ sch.mtx.Unlock()
+
unsorted, _ := sch.queue.Entries()
sorted := make([]container.QueueEnt, 0, len(unsorted))
for _, ent := range unsorted {
@@ -38,6 +45,13 @@ func (sch *Scheduler) runQueue() {
tryrun:
for i, ctr := range sorted {
ctr, it := ctr.Container, ctr.InstanceType
+ if pendingOps[ctr.UUID] {
+ // Wait for unfinished lock/unlock/cancel/kill
+ // operation to settle before doing anything
+ // further with this container.
+ sch.wakeup.Reset(time.Second / 4)
+ break
+ }
logger := sch.logger.WithFields(logrus.Fields{
"ContainerUUID": ctr.UUID,
"InstanceType": it.Name,
@@ -106,13 +120,14 @@ tryrun:
// we're at quota.
for _, ctr := range overquota {
ctr := ctr.Container
- if ctr.State == arvados.ContainerStateLocked {
+ if ctr.State == arvados.ContainerStateLocked && !pendingOps[ctr.UUID] && sch.uuidLock(ctr.UUID, "unlock") {
logger := sch.logger.WithField("ContainerUUID", ctr.UUID)
logger.Debug("unlock because pool capacity is used by higher priority containers")
err := sch.queue.Unlock(ctr.UUID)
if err != nil {
logger.WithError(err).Warn("error unlocking")
}
+ sch.uuidUnlock(ctr.UUID)
}
}
// Shut down idle workers that didn't get any
diff --git a/lib/dispatchcloud/scheduler/sync.go b/lib/dispatchcloud/scheduler/sync.go
index fc683505f..c9f325e26 100644
--- a/lib/dispatchcloud/scheduler/sync.go
+++ b/lib/dispatchcloud/scheduler/sync.go
@@ -6,6 +6,7 @@ package scheduler
import (
"fmt"
+ "time"
"git.arvados.org/arvados.git/lib/dispatchcloud/container"
"git.arvados.org/arvados.git/lib/dispatchcloud/worker"
@@ -24,10 +25,23 @@ import (
// Running containers whose crunch-run processes have exited are
// cancelled.
func (sch *Scheduler) sync() {
+ sch.mtx.Lock()
+ pendingOps := make(map[string]bool, len(sch.uuidOp))
+ for uuid := range sch.uuidOp {
+ pendingOps[uuid] = true
+ }
+ sch.mtx.Unlock()
anyUnknownWorkers := sch.pool.CountWorkers()[worker.StateUnknown] > 0
running := sch.pool.Running()
qEntries, qUpdated := sch.queue.Entries()
for uuid, ent := range qEntries {
+ if pendingOps[uuid] {
+ // Wait for unfinished lock/unlock/cancel/kill
+ // operation to settle before doing anything
+ // further with this container.
+ sch.wakeup.Reset(time.Second / 4)
+ break
+ }
exited, running := running[uuid]
switch ent.Container.State {
case arvados.ContainerStateRunning:
@@ -89,7 +103,7 @@ func (sch *Scheduler) sync() {
}
}
for uuid := range running {
- if _, known := qEntries[uuid]; !known {
+ if _, known := qEntries[uuid]; !known && !pendingOps[uuid] {
go sch.kill(uuid, "not in queue")
}
}
commit 5a36ea60cadf2f041f460eeeea970ff9dac66e71
Author: Tom Clegg <tom at curii.com>
Date: Mon Sep 6 14:47:03 2021 -0400
18102: Fix double-unlock in scheduler.
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>
diff --git a/lib/dispatchcloud/scheduler/run_queue.go b/lib/dispatchcloud/scheduler/run_queue.go
index b9d653a82..d2f6d1c2c 100644
--- a/lib/dispatchcloud/scheduler/run_queue.go
+++ b/lib/dispatchcloud/scheduler/run_queue.go
@@ -66,8 +66,7 @@ tryrun:
// starve this one by using keeping
// idle workers alive on different
// instance types.
- logger.Debug("unlocking: AtQuota and no unalloc workers")
- sch.queue.Unlock(ctr.UUID)
+ logger.Debug("overquota")
overquota = sorted[i:]
break tryrun
} else if logger.Info("creating new instance"); sch.pool.Create(it) {
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index fd1d0a870..c8d45cbd1 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -244,15 +244,22 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
starts: []string{},
canCreate: 0,
}
- New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond).runQueue()
+ sch := New(ctx, &queue, &pool, nil, time.Millisecond, time.Millisecond)
+ sch.runQueue()
+ sch.sync()
+ sch.runQueue()
+ sch.sync()
c.Check(pool.creates, check.DeepEquals, shouldCreate)
if len(shouldCreate) == 0 {
c.Check(pool.starts, check.DeepEquals, []string{})
- c.Check(pool.shutdowns, check.Not(check.Equals), 0)
} else {
c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})
- c.Check(pool.shutdowns, check.Equals, 0)
}
+ c.Check(pool.shutdowns, check.Equals, 3-quota)
+ c.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{
+ {UUID: "zzzzz-dz642-000000000000003", From: "Locked", To: "Queued"},
+ {UUID: "zzzzz-dz642-000000000000002", From: "Locked", To: "Queued"},
+ })
}
}
diff --git a/lib/dispatchcloud/test/queue.go b/lib/dispatchcloud/test/queue.go
index 3598ec6da..5973d1639 100644
--- a/lib/dispatchcloud/test/queue.go
+++ b/lib/dispatchcloud/test/queue.go
@@ -26,13 +26,27 @@ type Queue struct {
Logger logrus.FieldLogger
- entries map[string]container.QueueEnt
- updTime time.Time
- subscribers map[<-chan struct{}]chan struct{}
+ entries map[string]container.QueueEnt
+ updTime time.Time
+ subscribers map[<-chan struct{}]chan struct{}
+ stateChanges []QueueStateChange
mtx sync.Mutex
}
+type QueueStateChange struct {
+ UUID string
+ From arvados.ContainerState
+ To arvados.ContainerState
+}
+
+// All calls to Lock/Unlock/Cancel to date.
+func (q *Queue) StateChanges() []QueueStateChange {
+ q.mtx.Lock()
+ defer q.mtx.Unlock()
+ return q.stateChanges
+}
+
// Entries returns the containers that were queued when Update was
// last called.
func (q *Queue) Entries() (map[string]container.QueueEnt, time.Time) {
@@ -111,6 +125,7 @@ func (q *Queue) notify() {
// caller must have lock.
func (q *Queue) changeState(uuid string, from, to arvados.ContainerState) error {
ent := q.entries[uuid]
+ q.stateChanges = append(q.stateChanges, QueueStateChange{uuid, from, to})
if ent.Container.State != from {
return fmt.Errorf("changeState failed: state=%q", ent.Container.State)
}
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list