[ARVADOS] created: 1.3.0-547-gf35937526

Git user git at public.curoverse.com
Wed Mar 20 21:34:10 UTC 2019


        at  f35937526207a79013583afea084f8e2bff11502 (commit)


commit f35937526207a79013583afea084f8e2bff11502
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date:   Wed Mar 20 17:33:51 2019 -0400

    13996: Updating API server to use new config object WIP
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>

diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index ba9f7524e..53fc5d9cb 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -329,42 +329,49 @@ Clusters:
           # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
           AssignNodeHostname: compute%<slot_number>d
 
-        JobsAPI:
-          # Enable the legacy Jobs API.
-          # auto -- (default) enable the Jobs API only if it has been used before
-          #         (i.e., there are job records in the database)
-          # true -- enable the Jobs API despite lack of existing records.
-          # false -- disable the Jobs API despite presence of existing records.
-          Enable: auto
-
-          # Git repositories must be readable by api server, or you won't be
-          # able to submit crunch jobs. To pass the test suites, put a clone
-          # of the arvados tree in {git_repositories_dir}/arvados.git or
-          # {git_repositories_dir}/arvados/.git
-          GitInternalDir: /var/lib/arvados/internal.git
-
-          # Docker image to be used when none found in runtime_constraints of a job
-          DefaultDockerImage: ""
-
-          # :none or :slurm_immediate
-          CrunchJobWrapper: :none
-
-          # username, or false = do not set uid when running jobs.
-          CrunchJobUser: crunch
-
-          # The web service must be able to create/write this file, and
-          # crunch-job must be able to stat() it.
-          CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
-
-          # Control job reuse behavior when two completed jobs match the
-          # search criteria and have different outputs.
-          #
-          # If true, in case of a conflict, reuse the earliest job (this is
-          # similar to container reuse behavior).
-          #
-          # If false, in case of a conflict, do not reuse any completed job,
-          # but do reuse an already-running job if available (this is the
-          # original job reuse behavior, and is still the default).
-          ReuseJobIfOutputsDiffer: false
-
-      Mail: {}
+      JobsAPI:
+        # Enable the legacy Jobs API.
+        # auto -- (default) enable the Jobs API only if it has been used before
+        #         (i.e., there are job records in the database)
+        # true -- enable the Jobs API despite lack of existing records.
+        # false -- disable the Jobs API despite presence of existing records.
+        Enable: auto
+
+        # Git repositories must be readable by api server, or you won't be
+        # able to submit crunch jobs. To pass the test suites, put a clone
+        # of the arvados tree in {git_repositories_dir}/arvados.git or
+        # {git_repositories_dir}/arvados/.git
+        GitInternalDir: /var/lib/arvados/internal.git
+
+        # Docker image to be used when none found in runtime_constraints of a job
+        DefaultDockerImage: ""
+
+        # :none or :slurm_immediate
+        CrunchJobWrapper: :none
+
+        # username, or false = do not set uid when running jobs.
+        CrunchJobUser: crunch
+
+        # The web service must be able to create/write this file, and
+        # crunch-job must be able to stat() it.
+        CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+        # Control job reuse behavior when two completed jobs match the
+        # search criteria and have different outputs.
+        #
+        # If true, in case of a conflict, reuse the earliest job (this is
+        # similar to container reuse behavior).
+        #
+        # If false, in case of a conflict, do not reuse any completed job,
+        # but do reuse an already-running job if available (this is the
+        # original job reuse behavior, and is still the default).
+        ReuseJobIfOutputsDiffer: false
+
+      Mail:
+        MailchimpAPIKey:            # api-server/mailchimp_api_key
+        MailchimpListID:            # api-server/mailchimp_list_id
+        SendUserSetupNotificationEmail:  # workbench/send_user_setup_notification_email
+        IssueReporterEmailFrom:     # workbench/issue_reporter_email_from
+        IssueReporterEmailTo:       # workbench/issue_reporter_email_to
+        SupportEmailAddress:        # workbench/support_email_address
+        EmailFrom:                  # workbench/email_from
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index 6dbba1a24..7d6e697ca 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -306,7 +306,7 @@ class ApplicationController < ActionController::Base
       limit_query.each do |record|
         new_limit += 1
         read_total += record.read_length.to_i
-        if read_total >= Rails.configuration.max_index_database_read
+        if read_total >= Rails.configuration.API["MaxIndexDatabaseRead"]
           new_limit -= 1 if new_limit > 1
           @limit = new_limit
           break
@@ -417,7 +417,7 @@ class ApplicationController < ActionController::Base
   end
 
   def disable_api_methods
-    if Rails.configuration.disable_api_methods.
+    if Rails.configuration.API["DisabledAPIs"].
         include?(controller_name + "." + action_name)
       send_error("Disabled", status: 404)
     end
diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
index 6163f893c..755d8d705 100644
--- a/services/api/app/controllers/arvados/v1/groups_controller.rb
+++ b/services/api/app/controllers/arvados/v1/groups_controller.rb
@@ -191,7 +191,7 @@ class Arvados::V1::GroupsController < ApplicationController
 
     table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
 
-    disabled_methods = Rails.configuration.disable_api_methods
+    disabled_methods = Rails.configuration.API["DisabledAPIs"]
     avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
     klasses = avail_klasses.keys
 
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 771ef2b1f..6a658561d 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -39,7 +39,7 @@ class Arvados::V1::SchemaController < ApplicationController
         title: "Arvados API",
         description: "The API to interact with Arvados.",
         documentationLink: "http://doc.arvados.org/api/index.html",
-        defaultCollectionReplication: Rails.configuration.default_collection_replication,
+        defaultCollectionReplication: Rails.configuration.Collections["DefaultReplication"],
         protocol: "rest",
         baseUrl: root_url + "arvados/v1/",
         basePath: "/arvados/v1/",
@@ -70,7 +70,7 @@ class Arvados::V1::SchemaController < ApplicationController
                 when false
                   ''
                 when true
-                  'https://git.%s.arvadosapi.com/' % Rails.configuration.uuid_prefix
+                  'https://git.%s.arvadosapi.com/' % Rails.configuration.ClusterID
                 else
                   Rails.application.config.git_repo_https_base
                 end,
@@ -397,7 +397,7 @@ class Arvados::V1::SchemaController < ApplicationController
           end
         end
       end
-      Rails.configuration.disable_api_methods.each do |method|
+      Rails.configuration.API["DisabledAPIs"].each do |method|
         ctrl, action = method.split('.', 2)
         discovery[:resources][ctrl][:methods].delete(action.to_sym)
       end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
index 1889d74ea..c03f5b692 100644
--- a/services/api/app/controllers/user_sessions_controller.rb
+++ b/services/api/app/controllers/user_sessions_controller.rb
@@ -52,7 +52,7 @@ class UserSessionsController < ApplicationController
                       :first_name => omniauth['info']['first_name'],
                       :last_name => omniauth['info']['last_name'],
                       :identity_url => omniauth['info']['identity_url'],
-                      :is_active => Rails.configuration.new_users_are_active,
+                      :is_active => Rails.configuration.Users["NewUsersAreActive"],
                       :owner_uuid => system_user_uuid)
       if omniauth['info']['username']
         user.set_initial_username(requested: omniauth['info']['username'])
diff --git a/services/api/app/mailers/admin_notifier.rb b/services/api/app/mailers/admin_notifier.rb
index 87a5699f4..e454d64e0 100644
--- a/services/api/app/mailers/admin_notifier.rb
+++ b/services/api/app/mailers/admin_notifier.rb
@@ -5,32 +5,32 @@
 class AdminNotifier < ActionMailer::Base
   include AbstractController::Callbacks
 
-  default from: Rails.configuration.admin_notifier_email_from
+  default from: Rails.configuration.Users["AdminNotifierEmailFrom"]
 
   def new_user(user)
     @user = user
-    if not Rails.configuration.new_user_notification_recipients.empty? then
-      @recipients = Rails.configuration.new_user_notification_recipients
+    if not Rails.configuration.Users["NewUserNotificationRecipients"].empty? then
+      @recipients = Rails.configuration.Users["NewUserNotificationRecipients"]
       logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
 
       add_to_subject = ''
-      if Rails.configuration.auto_setup_new_users
+      if Rails.configuration.Users["AutoSetupNewUsers"]
         add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'
       end
 
       mail(to: @recipients,
-           subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification"
+           subject: "#{Rails.configuration.Users["EmailSubjectPrefix"]}New user created#{add_to_subject} notification"
           )
     end
   end
 
   def new_inactive_user(user)
     @user = user
-    if not Rails.configuration.new_inactive_user_notification_recipients.empty? then
-      @recipients = Rails.configuration.new_inactive_user_notification_recipients
+    if not Rails.configuration.Users["NewInactiveUserNotificationRecipients"].empty? then
+      @recipients = Rails.configuration.Users["NewInactiveUserNotificationRecipients"]
       logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
       mail(to: @recipients,
-           subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification"
+           subject: "#{Rails.configuration.Users["EmailSubjectPrefix"]}New inactive user notification"
           )
     end
   end
diff --git a/services/api/app/mailers/profile_notifier.rb b/services/api/app/mailers/profile_notifier.rb
index 8c0c5ec86..a23d5f345 100644
--- a/services/api/app/mailers/profile_notifier.rb
+++ b/services/api/app/mailers/profile_notifier.rb
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: AGPL-3.0
 
 class ProfileNotifier < ActionMailer::Base
-  default from: Rails.configuration.admin_notifier_email_from
+  default from: Rails.configuration.Users["AdminNotifierEmailFrom"]
 
   def profile_created(user, address)
     @user = user
diff --git a/services/api/app/mailers/user_notifier.rb b/services/api/app/mailers/user_notifier.rb
index 5fb7036bf..dbde7a973 100644
--- a/services/api/app/mailers/user_notifier.rb
+++ b/services/api/app/mailers/user_notifier.rb
@@ -5,7 +5,7 @@
 class UserNotifier < ActionMailer::Base
   include AbstractController::Callbacks
 
-  default from: Rails.configuration.user_notifier_email_from
+  default from: Rails.configuration.Users["UserNotifierEmailFrom"]
 
   def account_is_setup(user)
     @user = user
diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb
index 2002e90ac..2af97bc90 100644
--- a/services/api/app/models/arvados_model.rb
+++ b/services/api/app/models/arvados_model.rb
@@ -393,7 +393,7 @@ class ArvadosModel < ActiveRecord::Base
   end
 
   def logged_attributes
-    attributes.except(*Rails.configuration.unlogged_attributes)
+    attributes.except(*Rails.configuration.AuditLogs["UnloggedAttributes"])
   end
 
   def self.full_text_searchable_columns
@@ -715,7 +715,7 @@ class ArvadosModel < ActiveRecord::Base
   end
 
   def self.uuid_like_pattern
-    "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________"
+    "#{Rails.configuration.ClusterID}-#{uuid_prefix}-_______________"
   end
 
   def self.uuid_regex
@@ -794,8 +794,8 @@ class ArvadosModel < ActiveRecord::Base
   end
 
   def is_audit_logging_enabled?
-    return !(Rails.configuration.max_audit_log_age.to_i == 0 &&
-             Rails.configuration.max_audit_log_delete_batch.to_i > 0)
+    return !(Rails.configuration.AuditLogs["MaxAge"].to_i == 0 &&
+             Rails.configuration.AuditLogs["MaxDeleteBatch"].to_i > 0)
   end
 
   def log_start_state
diff --git a/services/api/app/models/blob.rb b/services/api/app/models/blob.rb
index 55a257856..500a66279 100644
--- a/services/api/app/models/blob.rb
+++ b/services/api/app/models/blob.rb
@@ -51,15 +51,15 @@ class Blob
       timestamp = opts[:expire]
     else
       timestamp = db_current_time.to_i +
-        (opts[:ttl] || Rails.configuration.blob_signature_ttl)
+        (opts[:ttl] || Rails.configuration.Collections["BlobSigningTTL"])
     end
     timestamp_hex = timestamp.to_s(16)
     # => "53163cb4"
-    blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+    blob_signature_ttl = Rails.configuration.Collections["BlobSigningTTL"].to_s(16)
 
     # Generate a signature.
     signature =
-      generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+      generate_signature((opts[:key] or Rails.configuration.Collections["BlobSigningKey"]),
                          blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl)
 
     blob_locator + '+A' + signature + '@' + timestamp_hex
@@ -103,10 +103,10 @@ class Blob
     if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
       raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
     end
-    blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+    blob_signature_ttl = Rails.configuration.Collections["BlobSigningTTL"].to_s(16)
 
     my_signature =
-      generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+      generate_signature((opts[:key] or Rails.configuration.Collections["BlobSigningKey"]),
                          blob_hash, opts[:api_token], timestamp, blob_signature_ttl)
 
     if my_signature != given_signature
diff --git a/services/api/app/models/commit.rb b/services/api/app/models/commit.rb
index 921c690cd..ed3c5cd35 100644
--- a/services/api/app/models/commit.rb
+++ b/services/api/app/models/commit.rb
@@ -148,7 +148,7 @@ class Commit < ActiveRecord::Base
     unless src_gitdir
       raise ArgumentError.new "no local repository for #{repo_name}"
     end
-    dst_gitdir = Rails.configuration.git_internal_dir
+    dst_gitdir = Rails.configuration.Containers["JobsAPI"]["GitInternalDir"]
 
     begin
       commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
index 7508ead5d..05f7d7272 100644
--- a/services/api/app/models/job.rb
+++ b/services/api/app/models/job.rb
@@ -284,7 +284,7 @@ class Job < ArvadosModel
         log_reuse_info { "job #{j.uuid} has nil output" }
       elsif j.log.nil?
         log_reuse_info { "job #{j.uuid} has nil log" }
-      elsif Rails.configuration.reuse_job_if_outputs_differ
+      elsif Rails.configuration.Containers["JobsAPI"]["ReuseJobIfOutputsDiffer"]
         if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
           # Ignore: keep looking for an incomplete job or one whose
           # output is readable.
@@ -490,7 +490,7 @@ class Job < ArvadosModel
   def find_docker_image_locator
     if runtime_constraints.is_a? Hash
       runtime_constraints['docker_image'] ||=
-        Rails.configuration.default_docker_image_for_jobs
+        Rails.configuration.Containers["JobsAPI"]["DefaultDockerImage"]
     end
 
     resolve_runtime_constraint("docker_image",
@@ -566,7 +566,7 @@ class Job < ArvadosModel
 
   def trigger_crunch_dispatch_if_cancelled
     if @need_crunch_dispatch_trigger
-      File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
+      File.open(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"], 'wb') do
         # That's all, just create/touch a file for crunch-job to see.
       end
     end
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
index 48655156c..bde9d51d2 100644
--- a/services/api/app/models/repository.rb
+++ b/services/api/app/models/repository.rb
@@ -49,7 +49,7 @@ class Repository < ArvadosModel
     # prefers bare repositories over checkouts.
     [["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
       [:uuid, :name].each do |path_attr|
-        git_dir = File.join(Rails.configuration.git_repositories_dir,
+        git_dir = File.join(Rails.configuration.Containers["Git"]["Repositories"],
                             repo_base % send(path_attr), *join_args)
         return git_dir if File.exist?(git_dir)
       end
@@ -108,8 +108,8 @@ class Repository < ArvadosModel
   def _clone_url config_var, default_base_fmt
     configured_base = Rails.configuration.send config_var
     return nil if configured_base == false
-    prefix = new_record? ? Rails.configuration.uuid_prefix : uuid[0,5]
-    if prefix == Rails.configuration.uuid_prefix and configured_base != true
+    prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
+    if prefix == Rails.configuration.ClusterID and configured_base != true
       base = configured_base
     else
       base = default_base_fmt % prefix
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 8ed97e6b1..137b0e093 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -34,7 +34,7 @@ class User < ArvadosModel
   after_create :add_system_group_permission_link
   after_create :invalidate_permissions_cache
   after_create :auto_setup_new_user, :if => Proc.new { |user|
-    Rails.configuration.auto_setup_new_users and
+    Rails.configuration.Users["AutoSetupNewUsers"] and
     (user.uuid != system_user_uuid) and
     (user.uuid != anonymous_user_uuid)
   }
@@ -81,7 +81,7 @@ class User < ArvadosModel
 
   def is_invited
     !!(self.is_active ||
-       Rails.configuration.new_users_are_active ||
+       Rails.configuration.Users["NewUsersAreActive"] ||
        self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
   end
 
@@ -358,15 +358,15 @@ class User < ArvadosModel
     current_user.andand.is_admin or
       (self == current_user &&
        self.redirect_to_user_uuid.nil? &&
-       self.is_active == Rails.configuration.new_users_are_active)
+       self.is_active == Rails.configuration.Users["NewUsersAreActive"])
   end
 
   def check_auto_admin
     return if self.uuid.end_with?('anonymouspublic')
     if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
-        Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or
+        Rails.configuration.Users["AutoAdminUserWithEmail"] and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or
        (User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and
-        Rails.configuration.auto_admin_first_user)
+        Rails.configuration.Users["AutoAdminFirstUser"])
       self.is_admin = true
       self.is_active = true
     end
@@ -381,7 +381,7 @@ class User < ArvadosModel
     quoted_name = self.class.connection.quote_string(basename)
     next_username = basename
     next_suffix = 1
-    while Rails.configuration.auto_setup_name_blacklist.include?(next_username)
+    while Rails.configuration.Users["AutoSetupUsernameBlacklist"].include?(next_username)
       next_suffix += 1
       next_username = "%s%i" % [basename, next_suffix]
     end
@@ -563,10 +563,10 @@ class User < ArvadosModel
   def auto_setup_new_user
     setup(openid_prefix: Rails.configuration.default_openid_prefix)
     if username
-      create_vm_login_permission_link(Rails.configuration.auto_setup_new_users_with_vm_uuid,
+      create_vm_login_permission_link(Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"],
                                       username)
       repo_name = "#{username}/#{username}"
-      if Rails.configuration.auto_setup_new_users_with_repository and
+      if Rails.configuration.Users["AutoSetupNewUsersWithRepository"] and
           Repository.where(name: repo_name).first.nil?
         repo = Repository.create!(name: repo_name, owner_uuid: uuid)
         Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
@@ -579,7 +579,7 @@ class User < ArvadosModel
   def send_profile_created_notification
     if self.prefs_changed?
       if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
-        profile_notification_address = Rails.configuration.user_profile_notification_address
+        profile_notification_address = Rails.configuration.Users["UserProfileNotificationAddress"]
         ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
       end
     end

commit 5644e5c5c86a1be726c214a5db08dbb11a2de305
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date:   Tue Mar 19 17:50:18 2019 -0400

    13996: Migrating defaults to new config structure
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>

diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
new file mode 100644
index 000000000..ba9f7524e
--- /dev/null
+++ b/lib/config/config.defaults.yml
@@ -0,0 +1,370 @@
+#
+#
+Clusters:
+  xxxxx:
+    SystemRootToken: ""
+
+    # Token to be included in all healthcheck requests. Disabled by default.
+    # Server expects request header of the format "Authorization: Bearer xxx"
+    ManagementToken: ""
+
+    API:
+      # Maximum size (in bytes) allowed for a single API request.  This
+      # limit is published in the discovery document for use by clients.
+      # Note: You must separately configure the upstream web server or
+      # proxy to actually enforce the desired maximum request size on the
+      # server side.
+      MaxRequestSize: 134217728
+
+      # Limit the number of bytes read from the database during an index
+      # request (by retrieving and returning fewer rows than would
+      # normally be returned in a single response).
+      # Note 1: This setting never reduces the number of returned rows to
+      # zero, no matter how big the first data row is.
+      # Note 2: Currently, this is only checked against a specific set of
+      # columns that tend to get large (collections.manifest_text,
+      # containers.mounts, workflows.definition). Other fields (e.g.,
+      # "properties" hashes) are not counted against this limit.
+      MaxIndexDatabaseRead: 134217728
+
+      # Maximum number of items to return when responding to a APIs that
+      # can return partial result sets using limit and offset parameters
+      # (e.g., *.index, groups.contents). If a request specifies a "limit"
+      # parameter higher than this value, this value is used instead.
+      MaxItemsPerResponse: 1000
+
+      # API methods to disable. Disabled methods are not listed in the
+      # discovery document, and respond 404 to all requests.
+      # Example: ["jobs.create", "pipeline_instances.create"]
+      DisabledAPIs: []
+
+    Users:
+      # Config parameters to automatically setup new users.  If enabled,
+      # this users will be able to self-activate.  Enable this if you want
+      # to run an open instance where anyone can create an account and use
+      # the system without requiring manual approval.
+      #
+      # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+      # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+      AutoSetupNewUsers: false
+      AutoSetupNewUsersWithVmUUID: ""
+      AutoSetupNewUsersWithRepository: false
+      AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+      # When new_users_are_active is set to true, new users will be active
+      # immediately.  This skips the "self-activate" step which enforces
+      # user agreements.  Should only be enabled for development.
+      NewUsersAreActive: false
+
+      # The e-mail address of the user you would like to become marked as an admin
+      # user on their first login.
+      # In the default configuration, authentication happens through the Arvados SSO
+      # server, which uses OAuth2 against Google's servers, so in that case this
+      # should be an address associated with a Google account.
+      AutoAdminUserWithEmail: ""
+
+      # If auto_admin_first_user is set to true, the first user to log in when no
+      # other admin users exist will automatically become an admin user.
+      AutoAdminFirstUser: false
+
+      # Email address to notify whenever a user creates a profile for the
+      # first time
+      UserProfileNotificationAddress: ""
+      AdminNotifierEmailFrom: arvados at example.com
+      EmailSubjectPrefix: "[ARVADOS] "
+      UserNotifierEmailFrom: arvados at example.com
+      NewUserNotificationRecipients: []
+      NewInactiveUserNotificationRecipients: []
+
+    AuditLogs:
+      # Time to keep audit logs, in seconds. (An audit log is a row added
+      # to the "logs" table in the PostgreSQL database each time an
+      # Arvados object is created, modified, or deleted.)
+      #
+      # Currently, websocket event notifications rely on audit logs, so
+      # this should not be set lower than 600 (5 minutes).
+      MaxAge: 1209600
+
+      # Maximum number of log rows to delete in a single SQL transaction.
+      #
+      # If max_audit_log_delete_batch is 0, log entries will never be
+      # deleted by Arvados. Cleanup can be done by an external process
+      # without affecting any Arvados system processes, as long as very
+      # recent (<5 minutes old) logs are not deleted.
+      #
+      # 100000 is a reasonable batch size for most sites.
+      MaxDeleteBatch: 0
+
+      # Attributes to suppress in events and audit logs.  Notably,
+      # specifying ["manifest_text"] here typically makes the database
+      # smaller and faster.
+      #
+      # Warning: Using any non-empty value here can have undesirable side
+      # effects for any client or component that relies on event logs.
+      # Use at your own risk.
+      UnloggedAttributes: []
+
+    SystemLogs:
+      # Maximum characters of (JSON-encoded) query parameters to include
+      # in each request log entry. When params exceed this size, they will
+      # be JSON-encoded, truncated to this size, and logged as
+      # params_truncated.
+      MaxRequestLogParamsSize: 2000
+
+    Collections:
+      # Allow clients to create collections by providing a manifest with
+      # unsigned data blob locators. IMPORTANT: This effectively disables
+      # access controls for data stored in Keep: a client who knows a hash
+      # can write a manifest that references the hash, pass it to
+      # collections.create (which will create a permission link), use
+      # collections.get to obtain a signature for that data locator, and
+      # use that signed locator to retrieve the data from Keep. Therefore,
+      # do not turn this on if your users expect to keep data private from
+      # one another!
+      BlobSigning: true
+
+      # blob_signing_key is a string of alphanumeric characters used to
+      # generate permission signatures for Keep locators. It must be
+      # identical to the permission key given to Keep. IMPORTANT: This is
+      # a site secret. It should be at least 50 characters.
+      #
+      # Modifying blob_signing_key will invalidate all existing
+      # signatures, which can cause programs to fail (e.g., arv-put,
+      # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
+      # no such processes are running.
+      BlobSigningKey: ""
+
+      # Default replication level for collections. This is used when a
+      # collection's replication_desired attribute is nil.
+      DefaultReplication: 2
+
+      # Lifetime (in seconds) of blob permission signatures generated by
+      # the API server. This determines how long a client can take (after
+      # retrieving a collection record) to retrieve the collection data
+      # from Keep. If the client needs more time than that (assuming the
+      # collection still has the same content and the relevant user/token
+      # still has permission) the client can retrieve the collection again
+      # to get fresh signatures.
+      #
+      # This must be exactly equal to the -blob-signature-ttl flag used by
+      # keepstore servers.  Otherwise, reading data blocks and saving
+      # collections will fail with HTTP 403 permission errors.
+      #
+      # Modifying blob_signature_ttl invalidates existing signatures; see
+      # blob_signing_key note above.
+      #
+      # The default is 2 weeks.
+      BlobSigningTTL: 1209600
+
+      # Default lifetime for ephemeral collections: 2 weeks. This must not
+      # be less than blob_signature_ttl.
+      DefaultTrashLifetime: 1209600
+
+      # Interval (seconds) between trash sweeps. During a trash sweep,
+      # collections are marked as trash if their trash_at time has
+      # arrived, and deleted if their delete_at time has arrived.
+      TrashSweepInterval: 60
+
+      # Interval (seconds) between asynchronous permission view updates. Any
+      # permission-updating API called with the 'async' parameter schedules a an
+      # update on the permission view in the future, if not already scheduled.
+      AsyncPermissionsUpdateInterval: 20
+
+      # If true, enable collection versioning.
+      # When a collection's preserve_version field is true or the current version
+      # is older than the amount of seconds defined on preserve_version_if_idle,
+      # a snapshot of the collection's previous state is created and linked to
+      # the current collection.
+      CollectionVersioning: false
+
+      #   0 = auto-create a new version on every update.
+      #  -1 = never auto-create new versions.
+      # > 0 = auto-create a new version when older than the specified number of seconds.
+      PreserveVersionIfIdle: -1
+
+    Login:
+      # These settings are provided by your OAuth2 provider (e.g.,
+      # sso-provider).
+      ProviderAppSecret: ""
+      ProviderAppID: ""
+
+    Git:
+      # Git repositories must be readable by api server, or you won't be
+      # able to submit crunch jobs. To pass the test suites, put a clone
+      # of the arvados tree in {git_repositories_dir}/arvados.git or
+      # {git_repositories_dir}/arvados/.git
+      Repositories: /var/lib/arvados/git/repositories
+
+    Containers:
+      # List of supported Docker Registry image formats that compute nodes
+      # are able to use. `arv keep docker` will error out if a user tries
+      # to store an image with an unsupported format. Use an empty array
+      # to skip the compatibility check (and display a warning message to
+      # that effect).
+      #
+      # Example for sites running docker < 1.10: ["v1"]
+      # Example for sites running docker >= 1.10: ["v2"]
+      # Example for disabling check: []
+      SupportedDockerImageFormats: ["v2"]
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      # Default value for keep_cache_ram of a container's runtime_constraints.
+      DefaultKeepCacheRAM: 268435456
+
+      # Number of times a container can be unlocked before being
+      # automatically cancelled.
+      MaxDispatchAttempts: 5
+
+      # Default value for container_count_max for container requests.  This is the
+      # number of times Arvados will create a new container to satisfy a container
+      # request.  If a container is cancelled it will retry a new container if
+      # container_count < container_count_max on any container requests associated
+      # with the cancelled container.
+      MaxRetryAttempts: 3
+
+      # The maximum number of compute nodes that can be in use simultaneously
+      # If this limit is reduced, any existing nodes with slot number >= new limit
+      # will not be counted against the new limit. In other words, the new limit
+      # won't be strictly enforced until those nodes with higher slot numbers
+      # go down.
+      MaxComputeVMs: 64
+
+      # Preemptible instance support (e.g. AWS Spot Instances)
+      # When true, child containers will get created with the preemptible
+      # scheduling parameter parameter set.
+      UsePreemptibleInstances: false
+
+      # Include details about job reuse decisions in the server log. This
+      # causes additional database queries to run, so it should not be
+      # enabled unless you expect to examine the resulting logs for
+      # troubleshooting purposes.
+      LogReuseDecisions: false
+
+      Logging:
+        # When you run the db:delete_old_container_logs task, it will find
+        # containers that have been finished for at least this many seconds,
+        # and delete their stdout, stderr, arv-mount, crunch-run, and
+        # crunchstat logs from the logs table.
+        MaxAge: 30d
+
+        # These two settings control how frequently log events are flushed to the
+        # database.  Log lines are buffered until either crunch_log_bytes_per_event
+        # has been reached or crunch_log_seconds_between_events has elapsed since
+        # the last flush.
+        LogBytesPerEvent: 4096
+        LogSecondsBetweenEvents: 1
+
+        # The sample period for throttling logs, in seconds.
+        LogThrottlePeriod: 60
+
+        # Maximum number of bytes that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleBytes: 65536
+
+        # Maximum number of lines that job can log over crunch_log_throttle_period
+        # before being silenced until the end of the period.
+        LogThrottleLines: 1024
+
+        # Maximum bytes that may be logged by a single job.  Log bytes that are
+        # silenced by throttling are not counted against this total.
+        LimitLogBytesPerJob: 67108864
+
+        LogPartialLineThrottlePeriod: 5
+
+        # Container logs are written to Keep and saved in a collection,
+        # which is updated periodically while the container runs.  This
+        # value sets the interval (given in seconds) between collection
+        # updates.
+        LogUpdatePeriod: 1800
+
+        # The log collection is also updated when the specified amount of
+        # log data (given in bytes) is produced in less than one update
+        # period.
+        LogUpdateSize: 33554432
+
+      SLURM:
+        Managed:
+          # Path to dns server configuration directory
+          # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+          # files or touch restart.txt (see below).
+          DNSServerConfDir: ""
+
+          # Template file for the dns server host snippets. See
+          # unbound.template in this directory for an example. If false, do
+          # not write any config files.
+          DNSServerConfTemplate: ""
+
+          # String to write to {dns_server_conf_dir}/restart.txt (with a
+          # trailing newline) after updating local data. If false, do not
+          # open or write the restart.txt file.
+          DNSServerReloadCommand: ""
+
+          # Command to run after each DNS update. Template variables will be
+          # substituted; see the "unbound" example below. If false, do not run
+          # a command.
+          DNSServerUpdateCommand: ""
+
+          ComputeNodeDomain: ""
+          ComputeNodeNameservers:
+            - 192.168.1.1
+
+          # Hostname to assign to a compute node when it sends a "ping" and the
+          # hostname in its Node record is nil.
+          # During bootstrapping, the "ping" script is expected to notice the
+          # hostname given in the ping response, and update its unix hostname
+          # accordingly.
+          # If false, leave the hostname alone (this is appropriate if your compute
+          # nodes' hostnames are already assigned by some other mechanism).
+          #
+          # One way or another, the hostnames of your node records should agree
+          # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+          #
+          # Example for compute0000, compute0001, ....:
+          # assign_node_hostname: compute%<slot_number>04d
+          # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+          AssignNodeHostname: compute%<slot_number>d
+
+        JobsAPI:
+          # Enable the legacy Jobs API.
+          # auto -- (default) enable the Jobs API only if it has been used before
+          #         (i.e., there are job records in the database)
+          # true -- enable the Jobs API despite lack of existing records.
+          # false -- disable the Jobs API despite presence of existing records.
+          Enable: auto
+
+          # Git repositories must be readable by api server, or you won't be
+          # able to submit crunch jobs. To pass the test suites, put a clone
+          # of the arvados tree in {git_repositories_dir}/arvados.git or
+          # {git_repositories_dir}/arvados/.git
+          GitInternalDir: /var/lib/arvados/internal.git
+
+          # Docker image to be used when none found in runtime_constraints of a job
+          DefaultDockerImage: ""
+
+          # :none or :slurm_immediate
+          CrunchJobWrapper: :none
+
+          # username, or false = do not set uid when running jobs.
+          CrunchJobUser: crunch
+
+          # The web service must be able to create/write this file, and
+          # crunch-job must be able to stat() it.
+          CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+          # Control job reuse behavior when two completed jobs match the
+          # search criteria and have different outputs.
+          #
+          # If true, in case of a conflict, reuse the earliest job (this is
+          # similar to container reuse behavior).
+          #
+          # If false, in case of a conflict, do not reuse any completed job,
+          # but do reuse an already-running job if available (this is the
+          # original job reuse behavior, and is still the default).
+          ReuseJobIfOutputsDiffer: false
+
+      Mail: {}
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index fc091e375..f52e50089 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -46,6 +46,7 @@ config_key_map =
    "max_request_size":                 "API.MaxRequestSize",
    "max_index_database_read":          "API.MaxIndexDatabaseRead",
    "max_items_per_response":           "API.MaxItemsPerResponse",
+   "async_permissions_update_interval":         "API.AsyncPermissionsUpdateInterval",
    "auto_setup_new_users":                      "Users.AutoSetupNewUsers",
    "auto_setup_new_users_with_vm_uuid":         "Users.AutoSetupNewUsersWithVmUUID",
    "auto_setup_new_users_with_repository":      "Users.AutoSetupNewUsersWithRepository",
@@ -72,7 +73,7 @@ config_key_map =
    "trash_sweep_interval":                      "Collections.TrashSweepInterval",
    "blob_signing_key":                          "Collections.BlobSigningKey",
    "blob_signature_ttl":                        "Collections.BlobSigningTTL",
-   "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning",
+   "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning", # XXX
    "docker_image_formats":             "Containers.SupportedDockerImageFormats",
    "log_reuse_decisions":              "Containers.LogReuseDecisions",
    "container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM",
@@ -97,6 +98,7 @@ config_key_map =
    "compute_node_domain":                     "Containers.SLURM.Managed.ComputeNodeDomain",
    "compute_node_nameservers":                "Containers.SLURM.Managed.ComputeNodeNameservers",
    "assign_node_hostname":                    "Containers.SLURM.Managed.AssignNodeHostname",
+   "enable_legacy_jobs_api":                  "Containers.JobsAPI.Enable",
    "crunch_job_wrapper":                      "Containers.JobsAPI.CrunchJobWrapper",
    "crunch_job_user":                         "Containers.JobsAPI.CrunchJobUser",
    "crunch_refresh_trigger":                  "Containers.JobsAPI.CrunchRefreshTrigger",
@@ -141,6 +143,8 @@ application_config.each do |k, v|
   end
 end
 
+puts $arvados_config.to_yaml
+
 Server::Application.configure do
   nils = []
   $arvados_config.each do |k, v|

commit e6386e372bc0dcae1ed5c75ab322280eb379e2be
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date:   Tue Mar 19 16:40:19 2019 -0400

    13996: Config migration WIP
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>

diff --git a/services/api/config/config.defaults.yml b/services/api/config/config.defaults.yml
new file mode 120000
index 000000000..3a43d4bcd
--- /dev/null
+++ b/services/api/config/config.defaults.yml
@@ -0,0 +1 @@
+../../../lib/config/config.defaults.yml
\ No newline at end of file
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 16059cad7..fc091e375 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -26,8 +26,88 @@ EOS
   WARNED_OMNIAUTH_CONFIG = true
 end
 
-$application_config = {}
+$arvados_config = {}
 
+["#{::Rails.root.to_s}/config/config.defaults.yml", "/etc/arvados/config.yml"].each do |path|
+  if File.exist? path
+    confs = YAML.load(IO.read(path), deserialize_symbols: false)
+    if confs
+      clusters = confs["Clusters"].first
+      $arvados_config["ClusterID"] = clusters[0]
+      $arvados_config.merge!(clusters[1])
+    end
+  end
+end
+
+config_key_map =
+  {
+    "git_repositories_dir":             "Git.Repositories",
+   "disable_api_methods":              "API.DisabledAPIs",
+   "max_request_size":                 "API.MaxRequestSize",
+   "max_index_database_read":          "API.MaxIndexDatabaseRead",
+   "max_items_per_response":           "API.MaxItemsPerResponse",
+   "auto_setup_new_users":                      "Users.AutoSetupNewUsers",
+   "auto_setup_new_users_with_vm_uuid":         "Users.AutoSetupNewUsersWithVmUUID",
+   "auto_setup_new_users_with_repository":      "Users.AutoSetupNewUsersWithRepository",
+   "auto_setup_name_blacklist":                 "Users.AutoSetupUsernameBlacklist",
+   "new_users_are_active":                      "Users.NewUsersAreActive",
+   "auto_admin_user":                           "Users.AutoAdminUserWithEmail",
+   "auto_admin_first_user":                     "Users.AutoAdminFirstUser",
+   "user_profile_notification_address":         "Users.UserProfileNotificationAddress",
+   "admin_notifier_email_from":                 "Users.AdminNotifierEmailFrom",
+   "email_subject_prefix":                      "Users.EmailSubjectPrefix",
+   "user_notifier_email_from":                  "Users.UserNotifierEmailFrom",
+   "new_user_notification_recipients":          "Users.NewUserNotificationRecipients",
+   "new_inactive_user_notification_recipients": "Users.NewInactiveUserNotificationRecipients",
+   "sso_app_secret":                            "Login.ProviderAppSecret",
+   "sso_app_id":                                "Login.ProviderAppID",
+   "max_audit_log_age":                         "AuditLogs.MaxAge",
+   "max_audit_log_delete_batch":                "AuditLogs.MaxDeleteBatch",
+   "unlogged_attributes":                       "AuditLogs.UnloggedAttributes",
+   "max_request_log_params_size":               "SystemLogs.MaxRequestLogParamsSize",
+   "default_collection_replication":            "Collections.DefaultReplication",
+   "default_trash_lifetime":                    "Collections.DefaultTrashLifetime",
+   "collection_versioning":                     "Collections.CollectionVersioning",
+   "preserve_version_if_idle":                  "Collections.PreserveVersionIfIdle",
+   "trash_sweep_interval":                      "Collections.TrashSweepInterval",
+   "blob_signing_key":                          "Collections.BlobSigningKey",
+   "blob_signature_ttl":                        "Collections.BlobSigningTTL",
+   "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning",
+   "docker_image_formats":             "Containers.SupportedDockerImageFormats",
+   "log_reuse_decisions":              "Containers.LogReuseDecisions",
+   "container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM",
+   "max_container_dispatch_attempts":  "Containers.MaxDispatchAttempts",
+   "container_count_max":              "Containers.MaxRetryAttempts",
+   "preemptible_instances":            "Containers.UsePreemptibleInstances",
+   "max_compute_nodes":                "Containers.MaxComputeVMs",
+   "crunch_log_bytes_per_event":       "Containers.Logging.LogBytesPerEvent",
+   "crunch_log_seconds_between_events": "Containers.Logging.LogSecondsBetweenEvents",
+   "crunch_log_throttle_period":        "Containers.Logging.LogThrottlePeriod",
+   "crunch_log_throttle_bytes":         "Containers.Logging.LogThrottleBytes",
+   "crunch_log_throttle_lines":         "Containers.Logging.LogThrottleLines",
+   "crunch_limit_log_bytes_per_job":    "Containers.Logging.LimitLogBytesPerJob",
+   "crunch_log_partial_line_throttle_period": "Containers.Logging.LogPartialLineThrottlePeriod",
+   "crunch_log_update_period":                "Containers.Logging.LogUpdatePeriod",
+   "crunch_log_update_size":                  "Containers.Logging.LogUpdateSize",
+   "clean_container_log_rows_after":          "Containers.Logging.MaxAge",
+   "dns_server_conf_dir":                     "Containers.SLURM.Managed.DNSServerConfDir",
+   "dns_server_conf_template":                "Containers.SLURM.Managed.DNSServerConfTemplate",
+   "dns_server_reload_command":               "Containers.SLURM.Managed.DNSServerReloadCommand",
+   "dns_server_update_command":               "Containers.SLURM.Managed.DNSServerUpdateCommand",
+   "compute_node_domain":                     "Containers.SLURM.Managed.ComputeNodeDomain",
+   "compute_node_nameservers":                "Containers.SLURM.Managed.ComputeNodeNameservers",
+   "assign_node_hostname":                    "Containers.SLURM.Managed.AssignNodeHostname",
+   "crunch_job_wrapper":                      "Containers.JobsAPI.CrunchJobWrapper",
+   "crunch_job_user":                         "Containers.JobsAPI.CrunchJobUser",
+   "crunch_refresh_trigger":                  "Containers.JobsAPI.CrunchRefreshTrigger",
+   "git_internal_dir":                        "Containers.JobsAPI.GitInternalDir",
+   "reuse_job_if_outputs_differ":             "Containers.JobsAPI.ReuseJobIfOutputsDiffer",
+   "default_docker_image_for_jobs":           "Containers.JobsAPI.DefaultDockerImage",
+   "mailchimp_api_key":                       "Mail.MailchimpAPIKey",
+   "mailchimp_list_id":                       "Mail.MailchimpListID",
+}
+
+application_config = {}
 %w(application.default application).each do |cfgfile|
   path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
   if File.exist? path
@@ -35,26 +115,41 @@ $application_config = {}
     confs = YAML.load(yaml, deserialize_symbols: true)
     # Ignore empty YAML file:
     next if confs == false
-    $application_config.merge!(confs['common'] || {})
-    $application_config.merge!(confs[::Rails.env.to_s] || {})
+    application_config.merge!(confs['common'] || {})
+    application_config.merge!(confs[::Rails.env.to_s] || {})
+  end
+end
+
+application_config.each do |k, v|
+  cfg = $arvados_config
+
+  if config_key_map[k.to_sym]
+     k = config_key_map[k.to_sym]
+  end
+
+  # "foo.bar: baz" --> { config.foo.bar = baz }
+  ks = k.split '.'
+  k = ks.pop
+  ks.each do |kk|
+    cfg = cfg[kk]
+    if cfg.nil?
+      break
+    end
+  end
+  if !cfg.nil?
+    cfg[k] = v
   end
 end
 
 Server::Application.configure do
   nils = []
-  $application_config.each do |k, v|
-    # "foo.bar: baz" --> { config.foo.bar = baz }
+  $arvados_config.each do |k, v|
     cfg = config
-    ks = k.split '.'
-    k = ks.pop
-    ks.each do |kk|
-      cfg = cfg.send(kk)
-    end
     if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
-      # Config must have been set already in environments/*.rb.
-      #
-      # After config files have been migrated, this mechanism should
-      # be deprecated, then removed.
+    # Config must have been set already in environments/*.rb.
+    #
+    # After config files have been migrated, this mechanism should
+    # be deprecated, then removed.
     elsif v.nil?
       # Config variables are not allowed to be nil. Make a "naughty"
       # list, and present it below.
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
index ed349600b..4532225a3 100644
--- a/services/api/lib/tasks/config_dump.rake
+++ b/services/api/lib/tasks/config_dump.rake
@@ -5,6 +5,6 @@
 namespace :config do
   desc 'Show site configuration'
   task dump: :environment do
-    puts $application_config.to_yaml
+    puts $arvados_config.to_yaml
   end
 end

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list