[arvados] updated: 2.6.0-328-g5372ee787

git repository hosting git at public.arvados.org
Mon Jun 26 23:03:31 UTC 2023


Summary of changes:
 lib/config/config.default.yml                 | 28 +++++++++++++++++++++++----
 lib/dispatchcloud/dispatcher.go               |  5 ++++-
 lib/dispatchcloud/dispatcher_test.go          |  1 +
 lib/dispatchcloud/scheduler/run_queue_test.go | 24 +++++++++++------------
 lib/dispatchcloud/scheduler/scheduler.go      |  8 ++++++--
 lib/dispatchcloud/scheduler/sync_test.go      |  4 ++--
 sdk/go/arvados/config.go                      |  1 +
 7 files changed, 50 insertions(+), 21 deletions(-)

       via  5372ee7878d6880081dd4b5481e1820fc7cd1975 (commit)
      from  c624929279e70d58017eab08ed286dda88bcd215 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 5372ee7878d6880081dd4b5481e1820fc7cd1975
Author: Tom Clegg <tom at curii.com>
Date:   Mon Jun 26 19:02:49 2023 -0400

    20667: Add InitialQuotaEstimate config.
    
    Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>

diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 49d62e298..723e64cea 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -1413,10 +1413,30 @@ Clusters:
         # down.
         MaxInstances: 64
 
-        # Maximum fraction of CloudVMs.MaxInstances allowed to run
-        # "supervisor" containers at any given time. A supervisor is a
-        # container whose purpose is mainly to submit and manage other
-        # containers, such as arvados-cwl-runner workflow runner.
+        # The minimum number of instances expected to be runnable
+        # without reaching a provider-imposed quota.
+        #
+        # This is used as the initial value for the dispatcher's
+        # dynamic instance limit, which increases (up to MaxInstances)
+        # as containers start up successfully and decreases in
+        # response to high API load and cloud quota errors.
+        #
+        # Setting this too high creates a risk that the dispatcher
+        # will cause deadlock by starting so many supervisor
+        # containers (based on SupervisorFraction and MaxInstances)
+        # that the cloud quota prevents them from running any child
+        # containers.
+        #
+        # Setting this too low causes the dispatcher to be
+        # unnecessarily slow to start up new instances after a
+        # restart.
+        InitialQuotaEstimate: 16
+
+        # Maximum fraction of available instance capacity allowed to
+        # run "supervisor" containers at any given time. A supervisor
+        # is a container whose purpose is mainly to submit and manage
+        # other containers, such as arvados-cwl-runner workflow
+        # runner.
         #
         # If there is a hard limit on the amount of concurrent
         # containers that the cluster can run, it is important to
diff --git a/lib/dispatchcloud/dispatcher.go b/lib/dispatchcloud/dispatcher.go
index 97cbd8edc..49be9e68a 100644
--- a/lib/dispatchcloud/dispatcher.go
+++ b/lib/dispatchcloud/dispatcher.go
@@ -198,7 +198,10 @@ func (disp *dispatcher) run() {
 	if pollInterval <= 0 {
 		pollInterval = defaultPollInterval
 	}
-	sched := scheduler.New(disp.Context, disp.ArvClient, disp.queue, disp.pool, disp.Registry, staleLockTimeout, pollInterval, disp.Cluster.Containers.CloudVMs.MaxInstances, disp.Cluster.Containers.CloudVMs.SupervisorFraction)
+	sched := scheduler.New(disp.Context, disp.ArvClient, disp.queue, disp.pool, disp.Registry, staleLockTimeout, pollInterval,
+		disp.Cluster.Containers.CloudVMs.InitialQuotaEstimate,
+		disp.Cluster.Containers.CloudVMs.MaxInstances,
+		disp.Cluster.Containers.CloudVMs.SupervisorFraction)
 	sched.Start()
 	defer sched.Stop()
 
diff --git a/lib/dispatchcloud/dispatcher_test.go b/lib/dispatchcloud/dispatcher_test.go
index 6a8adcad9..17121ffeb 100644
--- a/lib/dispatchcloud/dispatcher_test.go
+++ b/lib/dispatchcloud/dispatcher_test.go
@@ -78,6 +78,7 @@ func (s *DispatcherSuite) SetUpTest(c *check.C) {
 				TimeoutProbe:         arvados.Duration(15 * time.Millisecond),
 				TimeoutShutdown:      arvados.Duration(5 * time.Millisecond),
 				MaxCloudOpsPerSecond: 500,
+				InitialQuotaEstimate: 8,
 				PollInterval:         arvados.Duration(5 * time.Millisecond),
 				ProbeInterval:        arvados.Duration(5 * time.Millisecond),
 				MaxProbesPerSecond:   1000,
diff --git a/lib/dispatchcloud/scheduler/run_queue_test.go b/lib/dispatchcloud/scheduler/run_queue_test.go
index 60917a059..f407ac848 100644
--- a/lib/dispatchcloud/scheduler/run_queue_test.go
+++ b/lib/dispatchcloud/scheduler/run_queue_test.go
@@ -195,7 +195,7 @@ func (*SchedulerSuite) TestUseIdleWorkers(c *check.C) {
 		running:   map[string]time.Time{},
 		canCreate: 0,
 	}
-	New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0).runQueue()
+	New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue()
 	c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(1), test.InstanceType(1), test.InstanceType(1)})
 	c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})
 	c.Check(pool.running, check.HasLen, 1)
@@ -247,7 +247,7 @@ func (*SchedulerSuite) TestShutdownAtQuota(c *check.C) {
 			starts:    []string{},
 			canCreate: 0,
 		}
-		sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+		sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 		sch.sync()
 		sch.runQueue()
 		sch.sync()
@@ -361,7 +361,7 @@ func (*SchedulerSuite) TestIdleIn503QuietPeriod(c *check.C) {
 		starts:    []string{},
 		canCreate: 0,
 	}
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	sch.last503time = time.Now()
 	sch.maxConcurrency = 3
 	sch.sync()
@@ -416,7 +416,7 @@ func (*SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {
 		starts:    []string{},
 		canCreate: 0,
 	}
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 8, 0.5)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5)
 	sch.sync()
 	sch.runQueue()
 	sch.sync()
@@ -475,7 +475,7 @@ func (*SchedulerSuite) TestExcessSupervisors(c *check.C) {
 		starts:    []string{},
 		canCreate: 0,
 	}
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 8, 0.5)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 8, 0.5)
 	sch.sync()
 	sch.runQueue()
 	sch.sync()
@@ -526,7 +526,7 @@ func (*SchedulerSuite) TestEqualPriorityContainers(c *check.C) {
 		starts:    []string{},
 		canCreate: 0,
 	}
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	for i := 0; i < 30; i++ {
 		sch.runQueue()
 		sch.sync()
@@ -628,7 +628,7 @@ func (*SchedulerSuite) TestStartWhileCreating(c *check.C) {
 		},
 	}
 	queue.Update()
-	New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0).runQueue()
+	New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0).runQueue()
 	c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(2), test.InstanceType(1)})
 	c.Check(pool.starts, check.DeepEquals, []string{uuids[6], uuids[5], uuids[3], uuids[2]})
 	running := map[string]bool{}
@@ -672,7 +672,7 @@ func (*SchedulerSuite) TestKillNonexistentContainer(c *check.C) {
 		},
 	}
 	queue.Update()
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	c.Check(pool.running, check.HasLen, 1)
 	sch.sync()
 	for deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {
@@ -705,7 +705,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
 	pool := stubPool{
 		unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
 	}
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	sch.runQueue()
 	sch.updateMetrics()
 
@@ -717,7 +717,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
 	// 'over quota' metric will be 1 because no workers are available and canCreate defaults
 	// to zero.
 	pool = stubPool{}
-	sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	sch.runQueue()
 	sch.updateMetrics()
 
@@ -750,7 +750,7 @@ func (*SchedulerSuite) TestContainersMetrics(c *check.C) {
 		unalloc: map[arvados.InstanceType]int{test.InstanceType(1): 1},
 		running: map[string]time.Time{},
 	}
-	sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	sch.runQueue()
 	sch.updateMetrics()
 
@@ -824,7 +824,7 @@ func (*SchedulerSuite) TestSkipSupervisors(c *check.C) {
 		running:   map[string]time.Time{},
 		canCreate: 0,
 	}
-	New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 10, 0.2).runQueue()
+	New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 10, 0.2).runQueue()
 	c.Check(pool.creates, check.DeepEquals, []arvados.InstanceType(nil))
 	c.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)})
 }
diff --git a/lib/dispatchcloud/scheduler/scheduler.go b/lib/dispatchcloud/scheduler/scheduler.go
index 0a5a94c96..1db12279d 100644
--- a/lib/dispatchcloud/scheduler/scheduler.go
+++ b/lib/dispatchcloud/scheduler/scheduler.go
@@ -62,7 +62,7 @@ type Scheduler struct {
 //
 // Any given queue and pool should not be used by more than one
 // scheduler at a time.
-func New(ctx context.Context, client *arvados.Client, queue ContainerQueue, pool WorkerPool, reg *prometheus.Registry, staleLockTimeout, queueUpdateInterval time.Duration, maxInstances int, supervisorFraction float64) *Scheduler {
+func New(ctx context.Context, client *arvados.Client, queue ContainerQueue, pool WorkerPool, reg *prometheus.Registry, staleLockTimeout, queueUpdateInterval time.Duration, minQuota, maxInstances int, supervisorFraction float64) *Scheduler {
 	sch := &Scheduler{
 		logger:              ctxlog.FromContext(ctx),
 		client:              client,
@@ -75,10 +75,14 @@ func New(ctx context.Context, client *arvados.Client, queue ContainerQueue, pool
 		stop:                make(chan struct{}),
 		stopped:             make(chan struct{}),
 		uuidOp:              map[string]string{},
-		maxConcurrency:      maxInstances, // initial value -- will be dynamically adjusted
 		supervisorFraction:  supervisorFraction,
 		maxInstances:        maxInstances,
 	}
+	if minQuota > 0 {
+		sch.maxConcurrency = minQuota
+	} else {
+		sch.maxConcurrency = maxInstances
+	}
 	sch.registerMetrics(reg)
 	return sch
 }
diff --git a/lib/dispatchcloud/scheduler/sync_test.go b/lib/dispatchcloud/scheduler/sync_test.go
index 1fc56cb8e..846bb4fc9 100644
--- a/lib/dispatchcloud/scheduler/sync_test.go
+++ b/lib/dispatchcloud/scheduler/sync_test.go
@@ -48,7 +48,7 @@ func (*SchedulerSuite) TestForgetIrrelevantContainers(c *check.C) {
 	ents, _ := queue.Entries()
 	c.Check(ents, check.HasLen, 1)
 
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 	sch.sync()
 
 	ents, _ = queue.Entries()
@@ -80,7 +80,7 @@ func (*SchedulerSuite) TestCancelOrphanedContainers(c *check.C) {
 	ents, _ := queue.Entries()
 	c.Check(ents, check.HasLen, 1)
 
-	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0)
+	sch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, time.Millisecond, time.Millisecond, 0, 0, 0)
 
 	// Sync shouldn't cancel the container because it might be
 	// running on the VM with state=="unknown".
diff --git a/sdk/go/arvados/config.go b/sdk/go/arvados/config.go
index c49476997..a3e54952d 100644
--- a/sdk/go/arvados/config.go
+++ b/sdk/go/arvados/config.go
@@ -573,6 +573,7 @@ type CloudVMsConfig struct {
 	MaxProbesPerSecond             int
 	MaxConcurrentInstanceCreateOps int
 	MaxInstances                   int
+	InitialQuotaEstimate           int
 	SupervisorFraction             float64
 	PollInterval                   Duration
 	ProbeInterval                  Duration

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list