[ARVADOS] updated: 1.3.0-549-g3de7cd8af

Git user git at public.curoverse.com
Thu Mar 21 20:40:35 UTC 2019


Summary of changes:
 lib/config/config.defaults.yml                     |  88 +++-
 .../api/app/controllers/application_controller.rb  |  15 +-
 services/api/app/controllers/static_controller.rb  |   4 +-
 .../app/controllers/user_sessions_controller.rb    |   2 +-
 .../api/app/models/api_client_authorization.rb     |  10 +-
 services/api/app/models/collection.rb              |  24 +-
 services/api/app/models/commit_ancestor.rb         |   2 +-
 services/api/app/models/container.rb               |   4 +-
 services/api/app/models/container_request.rb       |   6 +-
 services/api/app/models/node.rb                    |  41 +-
 .../api/app/views/admin_notifier/new_user.text.erb |   3 +-
 services/api/config/application.default.yml        | 480 ---------------------
 services/api/config/initializers/load_config.rb    | 213 +++++----
 services/api/config/initializers/lograge.rb        |   4 +-
 services/api/config/initializers/omniauth_init.rb  |  12 +-
 services/api/lib/audit_logs.rb                     |   4 +-
 services/api/lib/crunch_dispatch.rb                |  34 +-
 services/api/lib/enable_jobs_api.rb                |   6 +-
 services/api/lib/josh_id.rb                        |   2 +-
 services/api/lib/load_param.rb                     |   2 +-
 services/api/lib/log_reuse_info.rb                 |   2 +-
 services/api/lib/refresh_permission_view.rb        |   4 +-
 services/api/lib/sweep_trashed_objects.rb          |   4 +-
 services/api/lib/tasks/config_check.rake           |   4 +-
 .../api/lib/tasks/delete_old_container_logs.rake   |   2 +-
 services/api/lib/tasks/delete_old_job_logs.rake    |   2 +-
 services/api/lib/trashable.rb                      |   6 +-
 27 files changed, 303 insertions(+), 677 deletions(-)

       via  3de7cd8af056b8369151e17483578fd646827022 (commit)
       via  a6dab7c8e0c9eeb14a1c66f54a668dbb8c577d03 (commit)
      from  f35937526207a79013583afea084f8e2bff11502 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.


commit 3de7cd8af056b8369151e17483578fd646827022
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date:   Thu Mar 21 16:39:32 2019 -0400

    13996: Migrate majority of defaults to config.defaults.yml
    
    API server knows types of config parameters (needed for type coercion,
    also useful for type checking.)
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>

diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index 53fc5d9cb..70162ee5f 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -1,5 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
 #
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
 #
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.defaults.yml
+
 Clusters:
   xxxxx:
     SystemRootToken: ""
@@ -8,6 +18,51 @@ Clusters:
     # Server expects request header of the format "Authorization: Bearer xxx"
     ManagementToken: ""
 
+    Services:
+      RailsAPI:
+        InternalURLs: {}
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+      Controller:
+        InternalURLs: {}
+        ExternalURL: ""
+      Websocket:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepbalance:
+        InternalURLs: {}
+      GitHTTP:
+        InternalURLs: {}
+        ExternalURL: ""
+      GitSSH:
+        ExternalURL: ""
+      DispatchCloud:
+        InternalURLs: {}
+      SSO:
+        ExternalURL: ""
+      Keepproxy:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAV:
+        InternalURLs: {}
+        ExternalURL: ""
+      WebDAVDownload:
+        InternalURLs: {}
+        ExternalURL: ""
+      Keepstore:
+        InternalURLs: {}
+      Composer:
+        ExternalURL: ""
+      WebShell:
+        ExternalURL: ""
+      Workbench1:
+        InternalURLs: {}
+        ExternalURL: ""
+      Workbench2:
+        ExternalURL: ""
     API:
       # Maximum size (in bytes) allowed for a single API request.  This
       # limit is published in the discovery document for use by clients.
@@ -38,6 +93,11 @@ Clusters:
       # Example: ["jobs.create", "pipeline_instances.create"]
       DisabledAPIs: []
 
+      # Interval (seconds) between asynchronous permission view updates. Any
+      # permission-updating API called with the 'async' parameter schedules a an
+      # update on the permission view in the future, if not already scheduled.
+      AsyncPermissionsUpdateInterval: 20
+
     Users:
       # Config parameters to automatically setup new users.  If enabled,
       # this users will be able to self-activate.  Enable this if you want
@@ -165,11 +225,6 @@ Clusters:
       # arrived, and deleted if their delete_at time has arrived.
       TrashSweepInterval: 60
 
-      # Interval (seconds) between asynchronous permission view updates. Any
-      # permission-updating API called with the 'async' parameter schedules a an
-      # update on the permission view in the future, if not already scheduled.
-      AsyncPermissionsUpdateInterval: 20
-
       # If true, enable collection versioning.
       # When a collection's preserve_version field is true or the current version
       # is older than the amount of seconds defined on preserve_version_if_idle,
@@ -195,6 +250,9 @@ Clusters:
       # {git_repositories_dir}/arvados/.git
       Repositories: /var/lib/arvados/git/repositories
 
+    TLS:
+      Insecure: false
+
     Containers:
       # List of supported Docker Registry image formats that compute nodes
       # are able to use. `arv keep docker` will error out if a user tries
@@ -327,7 +385,7 @@ Clusters:
           # Example for compute0000, compute0001, ....:
           # assign_node_hostname: compute%<slot_number>04d
           # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
-          AssignNodeHostname: compute%<slot_number>d
+          AssignNodeHostname: "compute%<slot_number>d"
 
       JobsAPI:
         # Enable the legacy Jobs API.
@@ -346,8 +404,8 @@ Clusters:
         # Docker image to be used when none found in runtime_constraints of a job
         DefaultDockerImage: ""
 
-        # :none or :slurm_immediate
-        CrunchJobWrapper: :none
+        # none or slurm_immediate
+        CrunchJobWrapper: none
 
         # username, or false = do not set uid when running jobs.
         CrunchJobUser: crunch
@@ -368,10 +426,10 @@ Clusters:
         ReuseJobIfOutputsDiffer: false
 
       Mail:
-        MailchimpAPIKey:            # api-server/mailchimp_api_key
-        MailchimpListID:            # api-server/mailchimp_list_id
-        SendUserSetupNotificationEmail:  # workbench/send_user_setup_notification_email
-        IssueReporterEmailFrom:     # workbench/issue_reporter_email_from
-        IssueReporterEmailTo:       # workbench/issue_reporter_email_to
-        SupportEmailAddress:        # workbench/support_email_address
-        EmailFrom:                  # workbench/email_from
+        MailchimpAPIKey: ""
+        MailchimpListID: ""
+        SendUserSetupNotificationEmail: ""
+        IssueReporterEmailFrom: ""
+        IssueReporterEmailTo: ""
+        SupportEmailAddress: ""
+        EmailFrom: ""
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index 7d6e697ca..e133edfa9 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -53,8 +53,6 @@ class ApplicationController < ActionController::Base
   before_filter(:render_404_if_no_object,
                 except: [:index, :create] + ERROR_ACTIONS)
 
-  theme Rails.configuration.arvados_theme
-
   attr_writer :resource_attrs
 
   begin
@@ -83,15 +81,10 @@ class ApplicationController < ActionController::Base
 
   def default_url_options
     options = {}
-    if Rails.configuration.host
-      options[:host] = Rails.configuration.host
-    end
-    if Rails.configuration.port
-      options[:port] = Rails.configuration.port
-    end
-    if Rails.configuration.protocol
-      options[:protocol] = Rails.configuration.protocol
-    end
+    exturl = URI.parse(Rails.configuration.Services["Controller"]["ExternalURL"])
+    options[:host] = exturl.host
+    options[:port] = exturl.port
+    options[:protocol] = exturl.scheme
     options
   end
 
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
index f0992c183..f0f7a34b5 100644
--- a/services/api/app/controllers/static_controller.rb
+++ b/services/api/app/controllers/static_controller.rb
@@ -12,8 +12,8 @@ class StaticController < ApplicationController
   def home
     respond_to do |f|
       f.html do
-        if Rails.configuration.workbench_address
-          redirect_to Rails.configuration.workbench_address
+        if Rails.configuration.Services["Workbench1"]["ExternalURL"]
+          redirect_to Rails.configuration.Services["Workbench1"]["ExternalURL"]
         else
           render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
         end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
index c03f5b692..6eb03c899 100644
--- a/services/api/app/controllers/user_sessions_controller.rb
+++ b/services/api/app/controllers/user_sessions_controller.rb
@@ -120,7 +120,7 @@ class UserSessionsController < ApplicationController
 
     flash[:notice] = 'You have logged off'
     return_to = params[:return_to] || root_url
-    redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+    redirect_to "#{Rails.configuration.Services["SSO"]["ExternalURL"]}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
   end
 
   # login - Just bounce to /auth/joshid. The only purpose of this function is
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index 152ecfbc7..d9aca4c76 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -199,7 +199,7 @@ class Node < ArvadosModel
       tmpfile = nil
       begin
         begin
-          template = IO.read(Rails.configuration.Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"])
+          template = IO.read(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"])
         rescue IOError, SystemCallError => e
           logger.error "Reading #{Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]}: #{e.message}"
           raise
diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml
index 8f0dbf4e4..40210cbb3 100644
--- a/services/api/config/application.default.yml
+++ b/services/api/config/application.default.yml
@@ -13,216 +13,6 @@
 # 5. Section in application.default.yml called "common"
 
 common:
-  ###
-  ### Essential site configuration
-  ###
-
-  # The prefix used for all database identifiers to identify the record as
-  # originating from this site.  Must be exactly 5 alphanumeric characters
-  # (lowercase ASCII letters and digits).
-  uuid_prefix: ~
-
-  # secret_token is a string of alphanumeric characters used by Rails
-  # to sign session tokens. IMPORTANT: This is a site secret. It
-  # should be at least 50 characters.
-  secret_token: ~
-
-  # blob_signing_key is a string of alphanumeric characters used to
-  # generate permission signatures for Keep locators. It must be
-  # identical to the permission key given to Keep. IMPORTANT: This is
-  # a site secret. It should be at least 50 characters.
-  #
-  # Modifying blob_signing_key will invalidate all existing
-  # signatures, which can cause programs to fail (e.g., arv-put,
-  # arv-get, and Crunch jobs).  To avoid errors, rotate keys only when
-  # no such processes are running.
-  blob_signing_key: ~
-
-  # These settings are provided by your OAuth2 provider (e.g.,
-  # sso-provider).
-  sso_app_secret: ~
-  sso_app_id: ~
-  sso_provider_url: ~
-
-  # If this is not false, HTML requests at the API server's root URL
-  # are redirected to this location, and it is provided in the text of
-  # user activation notification email messages to remind them where
-  # to log in.
-  workbench_address: false
-
-  # Client-facing URI for websocket service. Nginx should be
-  # configured to proxy this URI to arvados-ws; see
-  # http://doc.arvados.org/install/install-ws.html
-  #
-  # If websocket_address is false (which is the default), no websocket
-  # server will be advertised to clients. This configuration is not
-  # supported.
-  #
-  # Example:
-  #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket
-  websocket_address: false
-
-  # Maximum number of websocket connections allowed
-  websocket_max_connections: 500
-
-  # Maximum number of events a single connection can be backlogged
-  websocket_max_notify_backlog: 1000
-
-  # Maximum number of subscriptions a single websocket connection can have
-  # active.
-  websocket_max_filters: 10
-
-  # Git repositories must be readable by api server, or you won't be
-  # able to submit crunch jobs. To pass the test suites, put a clone
-  # of the arvados tree in {git_repositories_dir}/arvados.git or
-  # {git_repositories_dir}/arvados/.git
-  git_repositories_dir: /var/lib/arvados/git/repositories
-
-  # This is a (bare) repository that stores commits used in jobs.  When a job
-  # runs, the source commits are first fetched into this repository, then this
-  # repository is used to deploy to compute nodes.  This should NOT be a
-  # subdirectory of {git_repositiories_dir}.
-  git_internal_dir: /var/lib/arvados/internal.git
-
-  # Default replication level for collections. This is used when a
-  # collection's replication_desired attribute is nil.
-  default_collection_replication: 2
-
-
-  ###
-  ### Overriding default advertised hostnames/URLs
-  ###
-
-  # If not false, this is the hostname, port, and protocol that will be used
-  # for root_url and advertised in the discovery document.  By default, use
-  # the default Rails logic for deciding on a hostname.
-  host: false
-  port: false
-  protocol: false
-
-  # Base part of SSH git clone url given with repository resources. If
-  # true, the default "git at git.(uuid_prefix).arvadosapi.com:" is
-  # used. If false, SSH clone URLs are not advertised. Include a
-  # trailing ":" or "/" if needed: it will not be added automatically.
-  git_repo_ssh_base: true
-
-  # Base part of HTTPS git clone urls given with repository
-  # resources. This is expected to be an arv-git-httpd service which
-  # accepts API tokens as HTTP-auth passwords. If true, the default
-  # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
-  # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
-  # if needed: it will not be added automatically.
-  git_repo_https_base: true
-
-
-  ###
-  ### New user and & email settings
-  ###
-
-  # Config parameters to automatically setup new users.  If enabled,
-  # this users will be able to self-activate.  Enable this if you want
-  # to run an open instance where anyone can create an account and use
-  # the system without requiring manual approval.
-  #
-  # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
-  # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
-  auto_setup_new_users: false
-  auto_setup_new_users_with_vm_uuid: false
-  auto_setup_new_users_with_repository: false
-  auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
-
-  # When new_users_are_active is set to true, new users will be active
-  # immediately.  This skips the "self-activate" step which enforces
-  # user agreements.  Should only be enabled for development.
-  new_users_are_active: false
-
-  # The e-mail address of the user you would like to become marked as an admin
-  # user on their first login.
-  # In the default configuration, authentication happens through the Arvados SSO
-  # server, which uses OAuth2 against Google's servers, so in that case this
-  # should be an address associated with a Google account.
-  auto_admin_user: false
-
-  # If auto_admin_first_user is set to true, the first user to log in when no
-  # other admin users exist will automatically become an admin user.
-  auto_admin_first_user: false
-
-  # Email address to notify whenever a user creates a profile for the
-  # first time
-  user_profile_notification_address: false
-
-  admin_notifier_email_from: arvados at example.com
-  email_subject_prefix: "[ARVADOS] "
-  user_notifier_email_from: arvados at example.com
-  new_user_notification_recipients: [ ]
-  new_inactive_user_notification_recipients: [ ]
-
-
-  ###
-  ### Limits, timeouts and durations
-  ###
-
-  # Lifetime (in seconds) of blob permission signatures generated by
-  # the API server. This determines how long a client can take (after
-  # retrieving a collection record) to retrieve the collection data
-  # from Keep. If the client needs more time than that (assuming the
-  # collection still has the same content and the relevant user/token
-  # still has permission) the client can retrieve the collection again
-  # to get fresh signatures.
-  #
-  # This must be exactly equal to the -blob-signature-ttl flag used by
-  # keepstore servers.  Otherwise, reading data blocks and saving
-  # collections will fail with HTTP 403 permission errors.
-  #
-  # Modifying blob_signature_ttl invalidates existing signatures; see
-  # blob_signing_key note above.
-  #
-  # The default is 2 weeks.
-  blob_signature_ttl: 1209600
-
-  # Default lifetime for ephemeral collections: 2 weeks. This must not
-  # be less than blob_signature_ttl.
-  default_trash_lifetime: 1209600
-
-  # Interval (seconds) between trash sweeps. During a trash sweep,
-  # collections are marked as trash if their trash_at time has
-  # arrived, and deleted if their delete_at time has arrived.
-  trash_sweep_interval: 60
-
-  # Interval (seconds) between asynchronous permission view updates. Any
-  # permission-updating API called with the 'async' parameter schedules a an
-  # update on the permission view in the future, if not already scheduled.
-  async_permissions_update_interval: 20
-
-  # Maximum characters of (JSON-encoded) query parameters to include
-  # in each request log entry. When params exceed this size, they will
-  # be JSON-encoded, truncated to this size, and logged as
-  # params_truncated.
-  max_request_log_params_size: 2000
-
-  # Maximum size (in bytes) allowed for a single API request.  This
-  # limit is published in the discovery document for use by clients.
-  # Note: You must separately configure the upstream web server or
-  # proxy to actually enforce the desired maximum request size on the
-  # server side.
-  max_request_size: 134217728
-
-  # Limit the number of bytes read from the database during an index
-  # request (by retrieving and returning fewer rows than would
-  # normally be returned in a single response).
-  # Note 1: This setting never reduces the number of returned rows to
-  # zero, no matter how big the first data row is.
-  # Note 2: Currently, this is only checked against a specific set of
-  # columns that tend to get large (collections.manifest_text,
-  # containers.mounts, workflows.definition). Other fields (e.g.,
-  # "properties" hashes) are not counted against this limit.
-  max_index_database_read: 134217728
-
-  # Maximum number of items to return when responding to a APIs that
-  # can return partial result sets using limit and offset parameters
-  # (e.g., *.index, groups.contents). If a request specifies a "limit"
-  # parameter higher than this value, this value is used instead.
-  max_items_per_response: 1000
 
   # When you run the db:delete_old_job_logs task, it will find jobs that
   # have been finished for at least this many seconds, and delete their
@@ -235,229 +25,6 @@ common:
   # crunchstat logs from the logs table.
   clean_container_log_rows_after: <%= 30.days %>
 
-  # Time to keep audit logs, in seconds. (An audit log is a row added
-  # to the "logs" table in the PostgreSQL database each time an
-  # Arvados object is created, modified, or deleted.)
-  #
-  # Currently, websocket event notifications rely on audit logs, so
-  # this should not be set lower than 600 (5 minutes).
-  max_audit_log_age: 1209600
-
-  # Maximum number of log rows to delete in a single SQL transaction.
-  #
-  # If max_audit_log_delete_batch is 0, log entries will never be
-  # deleted by Arvados. Cleanup can be done by an external process
-  # without affecting any Arvados system processes, as long as very
-  # recent (<5 minutes old) logs are not deleted.
-  #
-  # 100000 is a reasonable batch size for most sites.
-  max_audit_log_delete_batch: 0
-
-  # The maximum number of compute nodes that can be in use simultaneously
-  # If this limit is reduced, any existing nodes with slot number >= new limit
-  # will not be counted against the new limit. In other words, the new limit
-  # won't be strictly enforced until those nodes with higher slot numbers
-  # go down.
-  max_compute_nodes: 64
-
-  # These two settings control how frequently log events are flushed to the
-  # database.  Log lines are buffered until either crunch_log_bytes_per_event
-  # has been reached or crunch_log_seconds_between_events has elapsed since
-  # the last flush.
-  crunch_log_bytes_per_event: 4096
-  crunch_log_seconds_between_events: 1
-
-  # The sample period for throttling logs, in seconds.
-  crunch_log_throttle_period: 60
-
-  # Maximum number of bytes that job can log over crunch_log_throttle_period
-  # before being silenced until the end of the period.
-  crunch_log_throttle_bytes: 65536
-
-  # Maximum number of lines that job can log over crunch_log_throttle_period
-  # before being silenced until the end of the period.
-  crunch_log_throttle_lines: 1024
-
-  # Maximum bytes that may be logged by a single job.  Log bytes that are
-  # silenced by throttling are not counted against this total.
-  crunch_limit_log_bytes_per_job: 67108864
-
-  crunch_log_partial_line_throttle_period: 5
-
-  # Container logs are written to Keep and saved in a collection,
-  # which is updated periodically while the container runs.  This
-  # value sets the interval (given in seconds) between collection
-  # updates.
-  crunch_log_update_period: 1800
-
-  # The log collection is also updated when the specified amount of
-  # log data (given in bytes) is produced in less than one update
-  # period.
-  crunch_log_update_size: 33554432
-
-  # Attributes to suppress in events and audit logs.  Notably,
-  # specifying ["manifest_text"] here typically makes the database
-  # smaller and faster.
-  #
-  # Warning: Using any non-empty value here can have undesirable side
-  # effects for any client or component that relies on event logs.
-  # Use at your own risk.
-  unlogged_attributes: []
-
-  # API methods to disable. Disabled methods are not listed in the
-  # discovery document, and respond 404 to all requests.
-  # Example: ["jobs.create", "pipeline_instances.create"]
-  disable_api_methods: []
-
-  # Enable the legacy Jobs API.
-  # auto -- (default) enable the Jobs API only if it has been used before
-  #         (i.e., there are job records in the database)
-  # true -- enable the Jobs API despite lack of existing records.
-  # false -- disable the Jobs API despite presence of existing records.
-  enable_legacy_jobs_api: auto
-
-  ###
-  ### Crunch, DNS & compute node management
-  ###
-
-  # Preemptible instance support (e.g. AWS Spot Instances)
-  # When true, child containers will get created with the preemptible
-  # scheduling parameter parameter set.
-  preemptible_instances: false
-
-  # Docker image to be used when none found in runtime_constraints of a job
-  default_docker_image_for_jobs: false
-
-  # List of supported Docker Registry image formats that compute nodes
-  # are able to use. `arv keep docker` will error out if a user tries
-  # to store an image with an unsupported format. Use an empty array
-  # to skip the compatibility check (and display a warning message to
-  # that effect).
-  #
-  # Example for sites running docker < 1.10: ["v1"]
-  # Example for sites running docker >= 1.10: ["v2"]
-  # Example for disabling check: []
-  docker_image_formats: ["v2"]
-
-  # :none or :slurm_immediate
-  crunch_job_wrapper: :none
-
-  # username, or false = do not set uid when running jobs.
-  crunch_job_user: crunch
-
-  # The web service must be able to create/write this file, and
-  # crunch-job must be able to stat() it.
-  crunch_refresh_trigger: /tmp/crunch_refresh_trigger
-
-  # Path to dns server configuration directory
-  # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
-  # files or touch restart.txt (see below).
-  dns_server_conf_dir: false
-
-  # Template file for the dns server host snippets. See
-  # unbound.template in this directory for an example. If false, do
-  # not write any config files.
-  dns_server_conf_template: false
-
-  # String to write to {dns_server_conf_dir}/restart.txt (with a
-  # trailing newline) after updating local data. If false, do not
-  # open or write the restart.txt file.
-  dns_server_reload_command: false
-
-  # Command to run after each DNS update. Template variables will be
-  # substituted; see the "unbound" example below. If false, do not run
-  # a command.
-  dns_server_update_command: false
-
-  ## Example for unbound:
-  #dns_server_conf_dir: /etc/unbound/conf.d
-  #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
-  ## ...plus one of the following two methods of reloading:
-  #dns_server_reload_command: unbound-control reload
-  #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
-
-  compute_node_domain: false
-  compute_node_nameservers:
-    - 192.168.1.1
-
-  # Hostname to assign to a compute node when it sends a "ping" and the
-  # hostname in its Node record is nil.
-  # During bootstrapping, the "ping" script is expected to notice the
-  # hostname given in the ping response, and update its unix hostname
-  # accordingly.
-  # If false, leave the hostname alone (this is appropriate if your compute
-  # nodes' hostnames are already assigned by some other mechanism).
-  #
-  # One way or another, the hostnames of your node records should agree
-  # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
-  #
-  # Example for compute0000, compute0001, ....:
-  # assign_node_hostname: compute%<slot_number>04d
-  # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
-  assign_node_hostname: compute%<slot_number>d
-
-
-  ###
-  ### Job and container reuse logic.
-  ###
-
-  # Include details about job reuse decisions in the server log. This
-  # causes additional database queries to run, so it should not be
-  # enabled unless you expect to examine the resulting logs for
-  # troubleshooting purposes.
-  log_reuse_decisions: false
-
-  # Control job reuse behavior when two completed jobs match the
-  # search criteria and have different outputs.
-  #
-  # If true, in case of a conflict, reuse the earliest job (this is
-  # similar to container reuse behavior).
-  #
-  # If false, in case of a conflict, do not reuse any completed job,
-  # but do reuse an already-running job if available (this is the
-  # original job reuse behavior, and is still the default).
-  reuse_job_if_outputs_differ: false
-
-  ###
-  ### Federation support.
-  ###
-
-  # You can enable use of this cluster by users who are authenticated
-  # by a remote Arvados site. Control which remote hosts are trusted
-  # to authenticate which user IDs by configuring remote_hosts,
-  # remote_hosts_via_dns, or both. The default configuration disables
-  # remote authentication.
-
-  # Map known prefixes to hosts. For example, if user IDs beginning
-  # with "zzzzz-" should be authenticated by the Arvados server at
-  # "zzzzz.example.com", use:
-  #
-  # remote_hosts:
-  #   zzzzz: zzzzz.example.com
-  remote_hosts: {}
-
-  # Use {prefix}.arvadosapi.com for any prefix not given in
-  # remote_hosts above.
-  remote_hosts_via_dns: false
-
-  # List of cluster prefixes.  These are "trusted" clusters, users
-  # from the clusters listed here will be automatically setup and
-  # activated.  This is separate from the settings
-  # auto_setup_new_users and new_users_are_active.
-  auto_activate_users_from: []
-
-  ###
-  ### Remaining assorted configuration options.
-  ###
-
-  arvados_theme: default
-
-  # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the
-  # Single Sign On (sso) server and remote Arvados sites.  Should only
-  # be enabled during development when the SSO server is using a
-  # self-signed cert.
-  sso_insecure: false
-
   ## Set Time.zone default to the specified zone and make Active
   ## Record auto-convert to this zone.  Run "rake -D time" for a list
   ## of tasks for finding time zone names. Default is UTC.
@@ -472,17 +39,6 @@ common:
   # Version of your assets, change this if you want to expire all your assets
   assets.version: "1.0"
 
-  # Allow clients to create collections by providing a manifest with
-  # unsigned data blob locators. IMPORTANT: This effectively disables
-  # access controls for data stored in Keep: a client who knows a hash
-  # can write a manifest that references the hash, pass it to
-  # collections.create (which will create a permission link), use
-  # collections.get to obtain a signature for that data locator, and
-  # use that signed locator to retrieve the data from Keep. Therefore,
-  # do not turn this on if your users expect to keep data private from
-  # one another!
-  permit_create_collection_with_unsigned_manifest: false
-
   default_openid_prefix: https://www.google.com/accounts/o8/id
 
   # Override the automatic version string. With the default value of
@@ -496,42 +52,6 @@ common:
   # (included in vendor packages).
   package_version: false
 
-  # Default value for container_count_max for container requests.  This is the
-  # number of times Arvados will create a new container to satisfy a container
-  # request.  If a container is cancelled it will retry a new container if
-  # container_count < container_count_max on any container requests associated
-  # with the cancelled container.
-  container_count_max: 3
-
-  # Default value for keep_cache_ram of a container's runtime_constraints.
-  container_default_keep_cache_ram: 268435456
-
-  # Token to be included in all healthcheck requests. Disabled by default.
-  # Server expects request header of the format "Authorization: Bearer xxx"
-  ManagementToken: false
-
-  # URL of keep-web service.  Provides read/write access to collections via
-  # HTTP and WebDAV protocols.
-  #
-  # Example:
-  # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/
-  keep_web_service_url: false
-
-  # If true, enable collection versioning.
-  # When a collection's preserve_version field is true or the current version
-  # is older than the amount of seconds defined on preserve_version_if_idle,
-  # a snapshot of the collection's previous state is created and linked to
-  # the current collection.
-  collection_versioning: false
-  #   0 = auto-create a new version on every update.
-  #  -1 = never auto-create new versions.
-  # > 0 = auto-create a new version when older than the specified number of seconds.
-  preserve_version_if_idle: -1
-
-  # Number of times a container can be unlocked before being
-  # automatically cancelled.
-  max_container_dispatch_attempts: 5
-
 development:
   force_ssl: false
   cache_classes: false
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index f52e50089..0a99b1afc 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -20,9 +20,9 @@ EOS
   # Real values will be copied from globals by omniauth_init.rb. For
   # now, assign some strings so the generic *.yml config loader
   # doesn't overwrite them or complain that they're missing.
-  Rails.configuration.sso_app_id = 'xxx'
-  Rails.configuration.sso_app_secret = 'xxx'
-  Rails.configuration.sso_provider_url = '//xxx'
+  Rails.configuration.Login["ProviderAppID"] = 'xxx'
+  Rails.configuration.Login["ProviderAppSecret"] = 'xxx'
+  Rails.configuration.Services["SSO"]["ExternalURL"] = '//xxx'
   WARNED_OMNIAUTH_CONFIG = true
 end
 
@@ -39,75 +39,110 @@ $arvados_config = {}
   end
 end
 
-config_key_map =
-  {
-    "git_repositories_dir":             "Git.Repositories",
-   "disable_api_methods":              "API.DisabledAPIs",
-   "max_request_size":                 "API.MaxRequestSize",
-   "max_index_database_read":          "API.MaxIndexDatabaseRead",
-   "max_items_per_response":           "API.MaxItemsPerResponse",
-   "async_permissions_update_interval":         "API.AsyncPermissionsUpdateInterval",
-   "auto_setup_new_users":                      "Users.AutoSetupNewUsers",
-   "auto_setup_new_users_with_vm_uuid":         "Users.AutoSetupNewUsersWithVmUUID",
-   "auto_setup_new_users_with_repository":      "Users.AutoSetupNewUsersWithRepository",
-   "auto_setup_name_blacklist":                 "Users.AutoSetupUsernameBlacklist",
-   "new_users_are_active":                      "Users.NewUsersAreActive",
-   "auto_admin_user":                           "Users.AutoAdminUserWithEmail",
-   "auto_admin_first_user":                     "Users.AutoAdminFirstUser",
-   "user_profile_notification_address":         "Users.UserProfileNotificationAddress",
-   "admin_notifier_email_from":                 "Users.AdminNotifierEmailFrom",
-   "email_subject_prefix":                      "Users.EmailSubjectPrefix",
-   "user_notifier_email_from":                  "Users.UserNotifierEmailFrom",
-   "new_user_notification_recipients":          "Users.NewUserNotificationRecipients",
-   "new_inactive_user_notification_recipients": "Users.NewInactiveUserNotificationRecipients",
-   "sso_app_secret":                            "Login.ProviderAppSecret",
-   "sso_app_id":                                "Login.ProviderAppID",
-   "max_audit_log_age":                         "AuditLogs.MaxAge",
-   "max_audit_log_delete_batch":                "AuditLogs.MaxDeleteBatch",
-   "unlogged_attributes":                       "AuditLogs.UnloggedAttributes",
-   "max_request_log_params_size":               "SystemLogs.MaxRequestLogParamsSize",
-   "default_collection_replication":            "Collections.DefaultReplication",
-   "default_trash_lifetime":                    "Collections.DefaultTrashLifetime",
-   "collection_versioning":                     "Collections.CollectionVersioning",
-   "preserve_version_if_idle":                  "Collections.PreserveVersionIfIdle",
-   "trash_sweep_interval":                      "Collections.TrashSweepInterval",
-   "blob_signing_key":                          "Collections.BlobSigningKey",
-   "blob_signature_ttl":                        "Collections.BlobSigningTTL",
-   "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning", # XXX
-   "docker_image_formats":             "Containers.SupportedDockerImageFormats",
-   "log_reuse_decisions":              "Containers.LogReuseDecisions",
-   "container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM",
-   "max_container_dispatch_attempts":  "Containers.MaxDispatchAttempts",
-   "container_count_max":              "Containers.MaxRetryAttempts",
-   "preemptible_instances":            "Containers.UsePreemptibleInstances",
-   "max_compute_nodes":                "Containers.MaxComputeVMs",
-   "crunch_log_bytes_per_event":       "Containers.Logging.LogBytesPerEvent",
-   "crunch_log_seconds_between_events": "Containers.Logging.LogSecondsBetweenEvents",
-   "crunch_log_throttle_period":        "Containers.Logging.LogThrottlePeriod",
-   "crunch_log_throttle_bytes":         "Containers.Logging.LogThrottleBytes",
-   "crunch_log_throttle_lines":         "Containers.Logging.LogThrottleLines",
-   "crunch_limit_log_bytes_per_job":    "Containers.Logging.LimitLogBytesPerJob",
-   "crunch_log_partial_line_throttle_period": "Containers.Logging.LogPartialLineThrottlePeriod",
-   "crunch_log_update_period":                "Containers.Logging.LogUpdatePeriod",
-   "crunch_log_update_size":                  "Containers.Logging.LogUpdateSize",
-   "clean_container_log_rows_after":          "Containers.Logging.MaxAge",
-   "dns_server_conf_dir":                     "Containers.SLURM.Managed.DNSServerConfDir",
-   "dns_server_conf_template":                "Containers.SLURM.Managed.DNSServerConfTemplate",
-   "dns_server_reload_command":               "Containers.SLURM.Managed.DNSServerReloadCommand",
-   "dns_server_update_command":               "Containers.SLURM.Managed.DNSServerUpdateCommand",
-   "compute_node_domain":                     "Containers.SLURM.Managed.ComputeNodeDomain",
-   "compute_node_nameservers":                "Containers.SLURM.Managed.ComputeNodeNameservers",
-   "assign_node_hostname":                    "Containers.SLURM.Managed.AssignNodeHostname",
-   "enable_legacy_jobs_api":                  "Containers.JobsAPI.Enable",
-   "crunch_job_wrapper":                      "Containers.JobsAPI.CrunchJobWrapper",
-   "crunch_job_user":                         "Containers.JobsAPI.CrunchJobUser",
-   "crunch_refresh_trigger":                  "Containers.JobsAPI.CrunchRefreshTrigger",
-   "git_internal_dir":                        "Containers.JobsAPI.GitInternalDir",
-   "reuse_job_if_outputs_differ":             "Containers.JobsAPI.ReuseJobIfOutputsDiffer",
-   "default_docker_image_for_jobs":           "Containers.JobsAPI.DefaultDockerImage",
-   "mailchimp_api_key":                       "Mail.MailchimpAPIKey",
-   "mailchimp_list_id":                       "Mail.MailchimpListID",
-}
+def set_cfg cfg, k, v
+  # "foo.bar: baz" --> { config.foo.bar = baz }
+  ks = k.split '.'
+  k = ks.pop
+  ks.each do |kk|
+    cfg = cfg[kk]
+    if cfg.nil?
+      break
+    end
+  end
+  if !cfg.nil?
+    cfg[k] = v
+  end
+end
+
+$config_migrate_map = {}
+$config_types = {}
+def declare_config(assign_to, configtype, migrate_from=nil)
+  if migrate_from
+    $config_migrate_map[migrate_from] = ->(cfg, k, v) {
+      set_cfg cfg, assign_to, v
+    }
+  end
+  $config_types[assign_to] = configtype
+end
+
+module Boolean; end
+class TrueClass; include Boolean; end
+class FalseClass; include Boolean; end
+
+declare_config "ClusterID", String, :uuid_prefix
+declare_config "Git.Repositories", String, :git_repositories_dir
+declare_config "API.DisabledAPIs", Array, :disable_api_methods
+declare_config "API.MaxRequestSize", Integer, :max_request_size
+declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
+declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
+declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
+declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
+declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
+declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
+declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
+declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
+declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
+declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address
+declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
+declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
+declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
+declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
+declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+declare_config "Login.ProviderAppSecret", String, :sso_app_secret
+declare_config "Login.ProviderAppID", String, :sso_app_id
+declare_config "TLS.Insecure", Boolean, :sso_insecure
+declare_config "Services.SSO.ExternalURL", String, :sso_provider_url
+declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
+declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
+declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
+declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
+declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
+declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
+declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
+declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
+declare_config "Collections.BlobSigningKey", String, :blob_signing_key
+declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
+declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
+declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
+declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
+declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
+declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances
+declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
+declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
+declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
+declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
+declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
+declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
+declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
+declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
+declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
+declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
+declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
+declare_config "Containers.SLURM.Managed.DNSServerConfDir", String, :dns_server_conf_dir
+declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", String, :dns_server_conf_template
+declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
+declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
+declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
+declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
+declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api
+declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
+declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
+declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
+declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
+declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
+declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
+declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
+declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
+declare_config "Services.Workbench1.ExternalURL", String, :workbench_address
+declare_config "Services.Websocket.ExternalURL", String, :websocket_address
+declare_config "Services.WebDAV.ExternalURL", String, :keep_web_service_url
+declare_config "Services.GitHTTP.ExternalURL", String, :git_repo_https_base
+declare_config "Services.GitSSH.ExternalURL", String, :git_repo_ssh_base
 
 application_config = {}
 %w(application.default application).each do |cfgfile|
@@ -123,13 +158,16 @@ application_config = {}
 end
 
 application_config.each do |k, v|
-  cfg = $arvados_config
-
-  if config_key_map[k.to_sym]
-     k = config_key_map[k.to_sym]
+  if $config_migrate_map[k.to_sym]
+    $config_migrate_map[k.to_sym].call $arvados_config, k, v
+  else
+    set_cfg $arvados_config, k, v
   end
+end
 
-  # "foo.bar: baz" --> { config.foo.bar = baz }
+$config_types.each do |cfgkey, cfgtype|
+  cfg = $arvados_config
+  k = cfgkey
   ks = k.split '.'
   k = ks.pop
   ks.each do |kk|
@@ -138,12 +176,25 @@ application_config.each do |k, v|
       break
     end
   end
-  if !cfg.nil?
-    cfg[k] = v
+  if cfgtype == String and !cfg[k]
+    cfg[k] = ""
+  end
+  if cfgtype == ActiveSupport::Duration
+    if cfg[k].is_a? Integer
+      cfg[k] = cfg[k].seconds
+    elsif cfg[k].is_a? String
+      # TODO handle suffixes
+    end
   end
-end
 
-puts $arvados_config.to_yaml
+  if cfg.nil?
+    raise "missing #{cfgkey}"
+  end
+
+  if !cfg[k].is_a? cfgtype
+    raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+  end
+end
 
 Server::Application.configure do
   nils = []
diff --git a/services/api/config/initializers/lograge.rb b/services/api/config/initializers/lograge.rb
index ef4e428bf..07dba3aef 100644
--- a/services/api/config/initializers/lograge.rb
+++ b/services/api/config/initializers/lograge.rb
@@ -38,8 +38,8 @@ Server::Application.configure do
     end
 
     params_s = SafeJSON.dump(params)
-    if params_s.length > Rails.configuration.max_request_log_params_size
-      payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
+    if params_s.length > Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]
+      payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]] + "[...]"
     else
       payload[:params] = params
     end
diff --git a/services/api/config/initializers/omniauth_init.rb b/services/api/config/initializers/omniauth_init.rb
index b5e98943d..5610999a9 100644
--- a/services/api/config/initializers/omniauth_init.rb
+++ b/services/api/config/initializers/omniauth_init.rb
@@ -9,15 +9,15 @@
 
 if defined? CUSTOM_PROVIDER_URL
   Rails.logger.warn "Copying omniauth from globals in legacy config file."
-  Rails.configuration.sso_app_id = APP_ID
-  Rails.configuration.sso_app_secret = APP_SECRET
-  Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL
+  Rails.configuration.Login["ProviderAppID"] = APP_ID
+  Rails.configuration.Login["ProviderAppSecret"] = APP_SECRET
+  Rails.configuration.Services["SSO"]["ExternalURL"] = CUSTOM_PROVIDER_URL
 else
   Rails.application.config.middleware.use OmniAuth::Builder do
     provider(:josh_id,
-             Rails.configuration.sso_app_id,
-             Rails.configuration.sso_app_secret,
-             Rails.configuration.sso_provider_url)
+             Rails.configuration.Login["ProviderAppID"],
+             Rails.configuration.Login["ProviderAppSecret"],
+             Rails.configuration.Services["SSO"]["ExternalURL"])
   end
   OmniAuth.config.on_failure = StaticController.action(:login_failure)
 end
diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake
index 4f071f11a..c42c37edb 100644
--- a/services/api/lib/tasks/config_check.rake
+++ b/services/api/lib/tasks/config_check.rake
@@ -21,8 +21,8 @@ namespace :config do
       end
     end
     # default_trash_lifetime cannot be less than 24 hours
-    if Rails.configuration.default_trash_lifetime < 86400 then
-      raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime
+    if Rails.configuration.Collections["DefaultTrashLifetime"] < 86400 then
+      raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.Collections["DefaultTrashLifetime"]
     end
   end
 end

commit a6dab7c8e0c9eeb14a1c66f54a668dbb8c577d03
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date:   Thu Mar 21 12:38:09 2019 -0400

    13996: More config updates
    
    Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>

diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb
index 38538cb4f..fc8ae5282 100644
--- a/services/api/app/models/api_client_authorization.rb
+++ b/services/api/app/models/api_client_authorization.rb
@@ -94,7 +94,7 @@ class ApiClientAuthorization < ArvadosModel
 
   def self.validate(token:, remote: nil)
     return nil if !token
-    remote ||= Rails.configuration.uuid_prefix
+    remote ||= Rails.configuration.ClusterID
 
     case token[0..2]
     when 'v2/'
@@ -134,7 +134,7 @@ class ApiClientAuthorization < ArvadosModel
       end
 
       uuid_prefix = uuid[0..4]
-      if uuid_prefix == Rails.configuration.uuid_prefix
+      if uuid_prefix == Rails.configuration.ClusterID
         # If the token were valid, we would have validated it above
         return nil
       elsif uuid_prefix.length != 5
@@ -153,7 +153,7 @@ class ApiClientAuthorization < ArvadosModel
       # [re]validate it.
       begin
         clnt = HTTPClient.new
-        if Rails.configuration.sso_insecure
+        if Rails.configuration.TLS["Insecure"]
           clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
         else
           # Use system CA certificates
@@ -164,7 +164,7 @@ class ApiClientAuthorization < ArvadosModel
         end
         remote_user = SafeJSON.load(
           clnt.get_content('https://' + host + '/arvados/v1/users/current',
-                           {'remote' => Rails.configuration.uuid_prefix},
+                           {'remote' => Rails.configuration.ClusterID},
                            {'Authorization' => 'Bearer ' + token}))
       rescue => e
         Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
@@ -187,7 +187,7 @@ class ApiClientAuthorization < ArvadosModel
           end
         end
 
-        if Rails.configuration.new_users_are_active ||
+        if Rails.configuration.Users["NewUsersAreActive"] ||
            Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
           # Update is_active to whatever it is at the remote end
           user.is_active = remote_user['is_active']
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index 6147b79f9..9e9ae5635 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -120,7 +120,7 @@ class Collection < ArvadosModel
             # Signature provided, but verify_signature did not like it.
             logger.warn "Invalid signature on locator #{tok}"
             raise ArvadosModel::PermissionDeniedError
-          elsif Rails.configuration.permit_create_collection_with_unsigned_manifest
+          elsif Rails.configuration.Collections["BlobSigning"]
             # No signature provided, but we are running in insecure mode.
             logger.debug "Missing signature on locator #{tok} ignored"
           elsif Blob.new(tok).empty?
@@ -304,9 +304,9 @@ class Collection < ArvadosModel
   end
 
   def should_preserve_version?
-    return false unless (Rails.configuration.collection_versioning && versionable_updates?(self.changes.keys))
+    return false unless (Rails.configuration.Collections["CollectionVersioning"] && versionable_updates?(self.changes.keys))
 
-    idle_threshold = Rails.configuration.preserve_version_if_idle
+    idle_threshold = Rails.configuration.Collections["PreserveVersionIfIdle"]
     if !self.preserve_version_was &&
       (idle_threshold < 0 ||
         (idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds))
@@ -354,7 +354,7 @@ class Collection < ArvadosModel
       return manifest_text
     else
       token = Thread.current[:token]
-      exp = [db_current_time.to_i + Rails.configuration.blob_signature_ttl,
+      exp = [db_current_time.to_i + Rails.configuration.Collections["BlobSigningTTL"],
              trash_at].compact.map(&:to_i).min
       self.class.sign_manifest manifest_text, token, exp
     end
@@ -362,7 +362,7 @@ class Collection < ArvadosModel
 
   def self.sign_manifest manifest, token, exp=nil
     if exp.nil?
-      exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+      exp = db_current_time.to_i + Rails.configuration.Collections["BlobSigningTTL"]
     end
     signing_opts = {
       api_token: token,
@@ -472,7 +472,7 @@ class Collection < ArvadosModel
   #
   # If filter_compatible_format is true (the default), only return image
   # collections which are support by the installation as indicated by
-  # Rails.configuration.docker_image_formats.  Will follow
+  # Rails.configuration.Containers["SupportedDockerImageFormats"].  Will follow
   # 'docker_image_migration' links if search_term resolves to an incompatible
   # image, but an equivalent compatible image is available.
   def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true)
@@ -483,15 +483,17 @@ class Collection < ArvadosModel
       joins("JOIN collections ON links.head_uuid = collections.uuid").
       order("links.created_at DESC")
 
-    if (Rails.configuration.docker_image_formats.include? 'v1' and
-        Rails.configuration.docker_image_formats.include? 'v2') or filter_compatible_format == false
+    docker_image_formats = Rails.configuration.Containers["SupportedDockerImageFormats"]
+
+    if (docker_image_formats.include? 'v1' and
+        docker_image_formats.include? 'v2') or filter_compatible_format == false
       pattern = /^(sha256:)?[0-9A-Fa-f]{64}\.tar$/
-    elsif Rails.configuration.docker_image_formats.include? 'v2'
+    elsif docker_image_formats.include? 'v2'
       pattern = /^(sha256:)[0-9A-Fa-f]{64}\.tar$/
-    elsif Rails.configuration.docker_image_formats.include? 'v1'
+    elsif docker_image_formats.include? 'v1'
       pattern = /^[0-9A-Fa-f]{64}\.tar$/
     else
-      raise "Unrecognized configuration for docker_image_formats #{Rails.configuration.docker_image_formats}"
+      raise "Unrecognized configuration for docker_image_formats #{docker_image_formats}"
     end
 
     # If the search term is a Collection locator that contains one file
diff --git a/services/api/app/models/commit_ancestor.rb b/services/api/app/models/commit_ancestor.rb
index 3d5152c3f..60798f103 100644
--- a/services/api/app/models/commit_ancestor.rb
+++ b/services/api/app/models/commit_ancestor.rb
@@ -17,7 +17,7 @@ class CommitAncestor < ActiveRecord::Base
   protected
 
   def ask_git_whether_is
-    @gitdirbase = Rails.configuration.git_repositories_dir
+    @gitdirbase = Rails.configuration.Git["Repositories"]
     self.is = nil
     Dir.foreach @gitdirbase do |repo|
       next if repo.match(/^\./)
diff --git a/services/api/app/models/container.rb b/services/api/app/models/container.rb
index abcfdbd29..cd6ac08fd 100644
--- a/services/api/app/models/container.rb
+++ b/services/api/app/models/container.rb
@@ -201,7 +201,7 @@ class Container < ArvadosModel
     rc = {}
     defaults = {
       'keep_cache_ram' =>
-      Rails.configuration.container_default_keep_cache_ram,
+      Rails.configuration.Containers["DefaultKeepCacheRAM"],
     }
     defaults.merge(runtime_constraints).each do |k, v|
       if v.is_a? Array
@@ -364,7 +364,7 @@ class Container < ArvadosModel
     transaction do
       reload(lock: 'FOR UPDATE')
       check_unlock_fail
-      if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+      if self.lock_count < Rails.configuration.Containers["MaxDispatchAttempts"]
         update_attributes!(state: Queued)
       else
         update_attributes!(state: Cancelled,
diff --git a/services/api/app/models/container_request.rb b/services/api/app/models/container_request.rb
index 921d4bee6..c6c347e27 100644
--- a/services/api/app/models/container_request.rb
+++ b/services/api/app/models/container_request.rb
@@ -192,7 +192,7 @@ class ContainerRequest < ArvadosModel
     self.runtime_constraints ||= {}
     self.mounts ||= {}
     self.cwd ||= "."
-    self.container_count_max ||= Rails.configuration.container_count_max
+    self.container_count_max ||= Rails.configuration.Containers["MaxComputeVMs"]
     self.scheduling_parameters ||= {}
     self.output_ttl ||= 0
     self.priority ||= 0
@@ -248,7 +248,7 @@ class ContainerRequest < ArvadosModel
     if self.state == Committed
       # If preemptible instances (eg: AWS Spot Instances) are allowed,
       # ask them on child containers by default.
-      if Rails.configuration.preemptible_instances and !c.nil? and
+      if Rails.configuration.Containers["UsePreemptibleInstances"] and !c.nil? and
         self.scheduling_parameters['preemptible'].nil?
           self.scheduling_parameters['preemptible'] = true
       end
@@ -318,7 +318,7 @@ class ContainerRequest < ArvadosModel
             scheduling_parameters['partitions'].size)
             errors.add :scheduling_parameters, "partitions must be an array of strings"
       end
-      if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
+      if !Rails.configuration.Containers["UsePreemptibleInstances"] and scheduling_parameters['preemptible']
         errors.add :scheduling_parameters, "preemptible instances are not allowed"
       end
       if scheduling_parameters.include? 'max_run_time' and
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index 3d8b91b4b..152ecfbc7 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -35,7 +35,7 @@ class Node < ArvadosModel
   api_accessible :superuser, :extend => :user do |t|
     t.add :first_ping_at
     t.add :info
-    t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers
+    t.add lambda { |x| Rails.configuration.Containers["SLURM"]["Managed"]["ComputeNodeNameservers"] }, :as => :nameservers
   end
 
   after_initialize do
@@ -43,7 +43,7 @@ class Node < ArvadosModel
   end
 
   def domain
-    super || Rails.configuration.compute_node_domain
+    super || Rails.configuration.Containers["SLURM"]["Managed"]["ComputeNodeDomain"]
   end
 
   def api_job_uuid
@@ -139,7 +139,7 @@ class Node < ArvadosModel
   protected
 
   def assign_hostname
-    if self.hostname.nil? and Rails.configuration.assign_node_hostname
+    if self.hostname.nil? and Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"]
       self.hostname = self.class.hostname_for_slot(self.slot_number)
     end
   end
@@ -155,7 +155,7 @@ class Node < ArvadosModel
                           # query label:
                           'Node.available_slot_number',
                           # [col_id, val] for $1 vars:
-                          [[nil, Rails.configuration.max_compute_nodes]],
+                          [[nil, Rails.configuration.Containers["MaxComputeVMs"]]],
                          ).rows.first.andand.first
   end
 
@@ -190,24 +190,24 @@ class Node < ArvadosModel
 
     template_vars = {
       hostname: hostname,
-      uuid_prefix: Rails.configuration.uuid_prefix,
+      uuid_prefix: Rails.configuration.ClusterID,
       ip_address: ip_address,
       ptr_domain: ptr_domain,
     }
 
-    if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template
+    if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]
       tmpfile = nil
       begin
         begin
-          template = IO.read(Rails.configuration.dns_server_conf_template)
+          template = IO.read(Rails.configuration.Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"])
         rescue IOError, SystemCallError => e
-          logger.error "Reading #{Rails.configuration.dns_server_conf_template}: #{e.message}"
+          logger.error "Reading #{Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]}: #{e.message}"
           raise
         end
 
-        hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+        hostfile = File.join Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], "#{hostname}.conf"
         Tempfile.open(["#{hostname}-", ".conf.tmp"],
-                                 Rails.configuration.dns_server_conf_dir) do |f|
+                                 Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"]) do |f|
           tmpfile = f.path
           f.puts template % template_vars
         end
@@ -223,20 +223,20 @@ class Node < ArvadosModel
       end
     end
 
-    if Rails.configuration.dns_server_update_command
-      cmd = Rails.configuration.dns_server_update_command % template_vars
+    if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"]
+      cmd = Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] % template_vars
       if not system cmd
         logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
         ok = false
       end
     end
 
-    if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_reload_command
-      restartfile = File.join(Rails.configuration.dns_server_conf_dir, 'restart.txt')
+    if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"]
+      restartfile = File.join(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], 'restart.txt')
       begin
         File.open(restartfile, 'w') do |f|
           # Typically, this is used to trigger a dns server restart
-          f.puts Rails.configuration.dns_server_reload_command
+          f.puts Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"]
         end
       rescue IOError, SystemCallError => e
         logger.error "Unable to write #{restartfile}: #{e.message}"
@@ -248,7 +248,7 @@ class Node < ArvadosModel
   end
 
   def self.hostname_for_slot(slot_number)
-    config = Rails.configuration.assign_node_hostname
+    config = Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"]
 
     return nil if !config
 
@@ -257,10 +257,13 @@ class Node < ArvadosModel
 
   # At startup, make sure all DNS entries exist.  Otherwise, slurmctld
   # will refuse to start.
-  if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template and Rails.configuration.assign_node_hostname
-    (0..Rails.configuration.max_compute_nodes-1).each do |slot_number|
+  if (Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and
+      Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] and
+      Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"])
+
+    (0..Rails.configuration.Containers["MaxComputeVMs"]-1).each do |slot_number|
       hostname = hostname_for_slot(slot_number)
-      hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+      hostfile = File.join Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], "#{hostname}.conf"
       if !File.exist? hostfile
         n = Node.where(:slot_number => slot_number).first
         if n.nil? or n.ip_address.nil?
diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb
index d21513f7f..20a36afcb 100644
--- a/services/api/app/views/admin_notifier/new_user.text.erb
+++ b/services/api/app/views/admin_notifier/new_user.text.erb
@@ -4,7 +4,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
 
 <%
   add_to_message = ''
-  if Rails.configuration.auto_setup_new_users
+  if Rails.configuration.Users["AutoSetupNewUsers"]
     add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
   end
 %>
@@ -22,4 +22,3 @@ Please see workbench for more information:
 <% end -%>
 Thanks,
 Your friendly Arvados robot.
-
diff --git a/services/api/lib/audit_logs.rb b/services/api/lib/audit_logs.rb
index 56fd935f3..4116ae0df 100644
--- a/services/api/lib/audit_logs.rb
+++ b/services/api/lib/audit_logs.rb
@@ -44,8 +44,8 @@ module AuditLogs
   end
 
   def self.tidy_in_background
-    max_age = Rails.configuration.max_audit_log_age
-    max_batch = Rails.configuration.max_audit_log_delete_batch
+    max_age = Rails.configuration.AuditLogs["MaxAge"]
+    max_batch = Rails.configuration.AuditLogs["MaxDeleteBatch"]
     return if max_age <= 0 || max_batch <= 0
 
     exp = (max_age/14).seconds
diff --git a/services/api/lib/crunch_dispatch.rb b/services/api/lib/crunch_dispatch.rb
index 449d7d516..eceada5a7 100644
--- a/services/api/lib/crunch_dispatch.rb
+++ b/services/api/lib/crunch_dispatch.rb
@@ -31,13 +31,13 @@ class CrunchDispatch
     @cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
     @srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT']
 
-    @arvados_internal = Rails.configuration.git_internal_dir
+    @arvados_internal = Rails.configuration.Containers["JobsAPI"]["GitInternalDir"]
     if not File.exist? @arvados_internal
       $stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
       raise "No internal git repository available" unless ($? == 0)
     end
 
-    @repo_root = Rails.configuration.git_repositories_dir
+    @repo_root = Rails.configuration.Git["Repositories"]
     @arvados_repo_path = Repository.where(name: "arvados").first.server_path
     @authorizations = {}
     @did_recently = {}
@@ -460,7 +460,7 @@ class CrunchDispatch
         bytes_logged: 0,
         events_logged: 0,
         log_throttle_is_open: true,
-        log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
+        log_throttle_reset_time: Time.now + Rails.configuration.Containers["Logging"]["LogThrottlePeriod"],
         log_throttle_bytes_so_far: 0,
         log_throttle_lines_so_far: 0,
         log_throttle_bytes_skipped: 0,
@@ -485,7 +485,7 @@ class CrunchDispatch
       matches = line.match(/^\S+ \S+ \d+ \d+ stderr (.*)/)
       if matches and matches[1] and matches[1].start_with?('[...]') and matches[1].end_with?('[...]')
         partial_line = true
-        if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.crunch_log_partial_line_throttle_period
+        if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"]
           running_job[:log_throttle_partial_line_last_at] = Time.now
         else
           skip_counts = true
@@ -499,26 +499,26 @@ class CrunchDispatch
       end
 
       if (running_job[:bytes_logged] >
-          Rails.configuration.crunch_limit_log_bytes_per_job)
-        message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
+          Rails.configuration.Containers["Logging"]["LimitLogBytesPerJob"])
+        message = "Exceeded log limit #{Rails.configuration.Containers["Logging"]["LimitLogBytesPerJob"]} bytes (LimitLogBytesPerJob). Log will be truncated."
         running_job[:log_throttle_reset_time] = Time.now + 100.years
         running_job[:log_throttle_is_open] = false
 
       elsif (running_job[:log_throttle_bytes_so_far] >
-             Rails.configuration.crunch_log_throttle_bytes)
+             Rails.configuration.Containers["Logging"]["LogThrottleBytes"])
         remaining_time = running_job[:log_throttle_reset_time] - Time.now
-        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds."
+        message = "Exceeded rate #{Rails.configuration.Containers["Logging"]["LogThrottleBytes"]} bytes per #{Rails.configuration.Containers["Logging"]["LogThrottlePeriod"]} seconds (LogThrottleBytes). Logging will be silenced for the next #{remaining_time.round} seconds."
         running_job[:log_throttle_is_open] = false
 
       elsif (running_job[:log_throttle_lines_so_far] >
-             Rails.configuration.crunch_log_throttle_lines)
+             Rails.configuration.Containers["Logging"]["LogThrottleLines"])
         remaining_time = running_job[:log_throttle_reset_time] - Time.now
-        message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds."
+        message = "Exceeded rate #{Rails.configuration.Containers["Logging"]["LogThrottleLines"]} lines per #{Rails.configuration.Containers["Logging"]["LogThrottlePeriod"]} seconds (LogThrottleLines), logging will be silenced for the next #{remaining_time.round} seconds."
         running_job[:log_throttle_is_open] = false
 
       elsif partial_line and running_job[:log_throttle_first_partial_line]
         running_job[:log_throttle_first_partial_line] = false
-        message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.crunch_log_partial_line_throttle_period} seconds."
+        message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"]} seconds."
       end
     end
 
@@ -552,7 +552,7 @@ class CrunchDispatch
           j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
         end
 
-        j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
+        j[:log_throttle_reset_time] = now + Rails.configuration.Containers["Logging"]["LogThrottlePeriod"]
         j[:log_throttle_bytes_so_far] = 0
         j[:log_throttle_lines_so_far] = 0
         j[:log_throttle_bytes_skipped] = 0
@@ -592,7 +592,7 @@ class CrunchDispatch
         bufend = ''
         streambuf.each_line do |line|
           if not line.end_with? $/
-            if line.size > Rails.configuration.crunch_log_throttle_bytes
+            if line.size > Rails.configuration.Containers["Logging"]["LogThrottleBytes"]
               # Without a limit here, we'll use 2x an arbitrary amount
               # of memory, and waste a lot of time copying strings
               # around, all without providing any feedback to anyone
@@ -775,7 +775,7 @@ class CrunchDispatch
 
     # This is how crunch-job child procs know where the "refresh"
     # trigger file is
-    ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+    ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"]
 
     # If salloc can't allocate resources immediately, make it use our
     # temporary failure exit code.  This ensures crunch-dispatch won't
@@ -937,8 +937,8 @@ class CrunchDispatch
     # Send out to log event if buffer size exceeds the bytes per event or if
     # it has been at least crunch_log_seconds_between_events seconds since
     # the last flush.
-    if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
-        (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
+    if running_job[:stderr_buf_to_flush].size > Rails.configuration.Containers["Logging"]["LogBytesPerEvent"] or
+        (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.Containers["Logging"]["LogSecondsBetweenEvents"]
       begin
         log = Log.new(object_uuid: running_job[:job].uuid,
                       event_type: 'stderr',
@@ -957,7 +957,7 @@ class CrunchDispatch
 
   # An array of job_uuids in squeue
   def squeue_jobs
-    if Rails.configuration.crunch_job_wrapper == :slurm_immediate
+    if Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"].to_sym == :slurm_immediate
       p = IO.popen(['squeue', '-a', '-h', '-o', '%j'])
       begin
         p.readlines.map {|line| line.strip}
diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb
index d99edd801..97476a09c 100644
--- a/services/api/lib/enable_jobs_api.rb
+++ b/services/api/lib/enable_jobs_api.rb
@@ -31,9 +31,9 @@ Disable_jobs_api_method_list = ["jobs.create",
                                                "job_tasks.show"]
 
 def check_enable_legacy_jobs_api
-  if Rails.configuration.enable_legacy_jobs_api == false ||
-     (Rails.configuration.enable_legacy_jobs_api == "auto" &&
+  if Rails.configuration.Containers["JobsAPI"]["Enable"] == false ||
+     (Rails.configuration.Containers["JobsAPI"]["Enable"] == "auto" &&
       ActiveRecord::Base.connection.exec_query("select count(*) from jobs").first["count"] == "0")
-    Rails.configuration.disable_api_methods = Disable_jobs_api_method_list
+    Rails.configuration.API["DisabledAPIs"] = Disable_jobs_api_method_list
   end
 end
diff --git a/services/api/lib/josh_id.rb b/services/api/lib/josh_id.rb
index bb6c1f48a..396d72444 100644
--- a/services/api/lib/josh_id.rb
+++ b/services/api/lib/josh_id.rb
@@ -40,7 +40,7 @@ module OmniAuth
         options.client_options[:site] = options[:custom_provider_url]
         options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize"
         options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token"
-        if Rails.configuration.sso_insecure
+        if Rails.configuration.TLS["Insecure"]
           options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE}
         end
         ::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options))
diff --git a/services/api/lib/load_param.rb b/services/api/lib/load_param.rb
index e7cb21fc7..51b951571 100644
--- a/services/api/lib/load_param.rb
+++ b/services/api/lib/load_param.rb
@@ -56,7 +56,7 @@ module LoadParam
         raise ArgumentError.new("Invalid value for limit parameter")
       end
       @limit = [params[:limit].to_i,
-                Rails.configuration.max_items_per_response].min
+                Rails.configuration.API["MaxItemsPerResponse"]].min
     else
       @limit = DEFAULT_LIMIT
     end
diff --git a/services/api/lib/log_reuse_info.rb b/services/api/lib/log_reuse_info.rb
index ed5cc82bf..01cf6dd78 100644
--- a/services/api/lib/log_reuse_info.rb
+++ b/services/api/lib/log_reuse_info.rb
@@ -9,7 +9,7 @@ module LogReuseInfo
   # doing expensive things like database queries, and we want to skip
   # those when logging is disabled.
   def log_reuse_info(candidates=nil)
-    if Rails.configuration.log_reuse_decisions
+    if Rails.configuration.Containers["LogReuseDecisions"]
       msg = yield
       if !candidates.nil?
         msg = "have #{candidates.count} candidates " + msg
diff --git a/services/api/lib/refresh_permission_view.rb b/services/api/lib/refresh_permission_view.rb
index 25be3c08d..e7fa263c7 100644
--- a/services/api/lib/refresh_permission_view.rb
+++ b/services/api/lib/refresh_permission_view.rb
@@ -12,8 +12,8 @@ def do_refresh_permission_view
 end
 
 def refresh_permission_view(async=false)
-  if async and Rails.configuration.async_permissions_update_interval > 0
-    exp = Rails.configuration.async_permissions_update_interval.seconds
+  if async and Rails.configuration.API["AsyncPermissionsUpdateInterval"] > 0
+    exp = Rails.configuration.API["AsyncPermissionsUpdateInterval"].seconds
     need = false
     Rails.cache.fetch('AsyncRefreshPermissionView', expires_in: exp) do
       need = true
diff --git a/services/api/lib/sweep_trashed_objects.rb b/services/api/lib/sweep_trashed_objects.rb
index bedbd68a4..6ade1fc56 100644
--- a/services/api/lib/sweep_trashed_objects.rb
+++ b/services/api/lib/sweep_trashed_objects.rb
@@ -55,8 +55,8 @@ module SweepTrashedObjects
   end
 
   def self.sweep_if_stale
-    return if Rails.configuration.trash_sweep_interval <= 0
-    exp = Rails.configuration.trash_sweep_interval.seconds
+    return if Rails.configuration.Collections["TrashSweepInterval"] <= 0
+    exp = Rails.configuration.Collections["TrashSweepInterval"].seconds
     need = false
     Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do
       need = true
diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake
index b45113e8a..c926c2ae0 100644
--- a/services/api/lib/tasks/delete_old_container_logs.rake
+++ b/services/api/lib/tasks/delete_old_container_logs.rake
@@ -11,7 +11,7 @@ namespace :db do
   desc "Remove old container log entries from the logs table"
 
   task delete_old_container_logs: :environment do
-    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.clean_container_log_rows_after} seconds')"
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers["Logging"]["MaxAge"]} seconds')"
 
     ActiveRecord::Base.connection.execute(delete_sql)
   end
diff --git a/services/api/lib/tasks/delete_old_job_logs.rake b/services/api/lib/tasks/delete_old_job_logs.rake
index dcd92b19b..327f663b2 100644
--- a/services/api/lib/tasks/delete_old_job_logs.rake
+++ b/services/api/lib/tasks/delete_old_job_logs.rake
@@ -9,7 +9,7 @@
 namespace :db do
   desc "Remove old job stderr entries from the logs table"
   task delete_old_job_logs: :environment do
-    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.clean_job_log_rows_after} seconds')"
+    delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Rails.configuration.Containers["Logging"]["MaxAge"]} seconds')"
 
     ActiveRecord::Base.connection.execute(delete_sql)
   end
diff --git a/services/api/lib/trashable.rb b/services/api/lib/trashable.rb
index 968796296..4981b8cd3 100644
--- a/services/api/lib/trashable.rb
+++ b/services/api/lib/trashable.rb
@@ -50,7 +50,7 @@ module Trashable
       if trash_at.nil?
         self.delete_at = nil
       else
-        self.delete_at = trash_at + Rails.configuration.default_trash_lifetime.seconds
+        self.delete_at = trash_at + Rails.configuration.Collections["DefaultTrashLifetime"].seconds
       end
     elsif !trash_at || !delete_at || trash_at > delete_at
       # Not trash, or bogus arguments? Just validate in
@@ -65,7 +65,7 @@ module Trashable
       earliest_delete = [
         @validation_timestamp,
         trash_at_was,
-      ].compact.min + Rails.configuration.blob_signature_ttl.seconds
+      ].compact.min + Rails.configuration.Collections["BlobSigningTTL"].seconds
 
       # The previous value of delete_at is also an upper bound on the
       # longest-lived permission token. For example, if TTL=14,
@@ -96,7 +96,7 @@ module TrashableController
       @object.update_attributes!(trash_at: db_current_time)
     end
     earliest_delete = (@object.trash_at +
-                       Rails.configuration.blob_signature_ttl.seconds)
+                       Rails.configuration.Collections["BlobSigningTTL"].seconds)
     if @object.delete_at > earliest_delete
       @object.update_attributes!(delete_at: earliest_delete)
     end

-----------------------------------------------------------------------


hooks/post-receive
-- 




More information about the arvados-commits mailing list