[ARVADOS] created: 1.3.0-728-g023015bac
Git user
git at public.curoverse.com
Mon Apr 8 15:11:29 UTC 2019
at 023015bacfded08503d0240d3f71838204d8dcb7 (commit)
commit 023015bacfded08503d0240d3f71838204d8dcb7
Author: Ward Vandewege <wvandewege at veritasgenetics.com>
Date: Fri Apr 5 13:09:00 2019 -0400
15042: handle config.default.yml in our packaging.
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <wvandewege at veritasgenetics.com>
diff --git a/build/run-library.sh b/build/run-library.sh
index 1daceff23..01a6a06c1 100755
--- a/build/run-library.sh
+++ b/build/run-library.sh
@@ -352,6 +352,15 @@ handle_rails_package() {
if [[ "$pkgname" != "arvados-workbench" ]]; then
exclude_list+=('config/database.yml')
fi
+ # for arvados-api-server, we need to dereference the
+ # config/config.default.yml file. There is no fpm way to do that, sadly
+ # (excluding the existing symlink and then adding the file from its source
+ # path doesn't work, sadly.
+ if [[ "$pkgname" == "arvados-api-server" ]]; then
+ mv /arvados/services/api/config/config.default.yml /arvados/services/api/config/config.default.yml.bu
+ cp -p /arvados/lib/config/config.default.yml /arvados/services/api/config/
+ exclude_list+=('config/config.default.yml.bu')
+ fi
for exclude in ${exclude_list[@]}; do
switches+=(-x "$exclude_root/$exclude")
done
@@ -359,6 +368,11 @@ handle_rails_package() {
-x "$exclude_root/vendor/cache-*" \
-x "$exclude_root/vendor/bundle" "$@" "$license_arg"
rm -rf "$scripts_dir"
+ # Undo the deferencing we did above
+ if [[ "$pkgname" == "arvados-api-server" ]]; then
+ rm -f /arvados/services/api/config/config.default.yml
+ mv /arvados/services/api/config/config.default.yml.bu /arvados/services/api/config/config.default.yml
+ fi
}
# Build python packages with a virtualenv built-in
commit 234878d253910806a12f599cef8484c8890e7ef9
Author: Ward Vandewege <wvandewege at veritasgenetics.com>
Date: Fri Apr 5 13:07:00 2019 -0400
13996: rename config.defaults.yml to config.default.yml, to remain consistent
with our existing default files.
Arvados-DCO-1.1-Signed-off-by: Ward Vandewege <wvandewege at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.default.yml
similarity index 99%
rename from lib/config/config.defaults.yml
rename to lib/config/config.default.yml
index 3da6dc803..d46c5c0ae 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.default.yml
@@ -8,7 +8,7 @@
# The order of precedence (highest to lowest):
# 1. Legacy component-specific config files (deprecated)
# 2. /etc/arvados/config.yml
-# 3. config.defaults.yml
+# 3. config.default.yml
Clusters:
xxxxx:
diff --git a/services/api/config/arvados_config.rb b/services/api/config/arvados_config.rb
index b0ac7c0be..219455e32 100644
--- a/services/api/config/arvados_config.rb
+++ b/services/api/config/arvados_config.rb
@@ -3,7 +3,8 @@
# SPDX-License-Identifier: AGPL-3.0
#
-# Load Arvados configuration from /etc/arvados/config.yml, using defaults from config.defaults.yml
+# Load Arvados configuration from /etc/arvados/config.yml, using defaults
+# from config.default.yml
#
# Existing application.yml is migrated into the new config structure.
# Keys in the legacy application.yml take precedence.
@@ -43,7 +44,7 @@ end
$arvados_config = {}
-["#{::Rails.root.to_s}/config/config.defaults.yml", "/etc/arvados/config.yml"].each do |path|
+["#{::Rails.root.to_s}/config/config.default.yml", "/etc/arvados/config.yml"].each do |path|
if File.exist? path
confs = YAML.load(IO.read(path), deserialize_symbols: false)
if confs
diff --git a/services/api/config/config.defaults.yml b/services/api/config/config.defaults.yml
deleted file mode 120000
index 3a43d4bcd..000000000
--- a/services/api/config/config.defaults.yml
+++ /dev/null
@@ -1 +0,0 @@
-../../../lib/config/config.defaults.yml
\ No newline at end of file
commit 6e9fcde0422b33d081e2985975e3104eb2434957
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Wed Apr 3 16:41:58 2019 -0400
13996: Can now use database info from config.yml
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/arvados_config.rb
similarity index 85%
rename from services/api/config/initializers/load_config.rb
rename to services/api/config/arvados_config.rb
index 8bed5c655..b0ac7c0be 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/arvados_config.rb
@@ -2,6 +2,19 @@
#
# SPDX-License-Identifier: AGPL-3.0
+#
+# Load Arvados configuration from /etc/arvados/config.yml, using defaults from config.defaults.yml
+#
+# Existing application.yml is migrated into the new config structure.
+# Keys in the legacy application.yml take precedence.
+#
+# Use "bundle exec config:dump" to get the complete active configuration
+#
+# Use "bundle exec config:migrate" to migrate application.yml and
+# database.yml to config.yml. After adding the output of
+# config:migrate to /etc/arvados/config.yml, you will be able to
+# delete application.yml and database.yml.
+
require 'config_loader'
begin
@@ -36,7 +49,7 @@ $arvados_config = {}
if confs
clusters = confs["Clusters"].first
$arvados_config["ClusterID"] = clusters[0]
- $arvados_config.merge!(clusters[1])
+ $arvados_config.deep_merge!(clusters[1])
end
end
end
@@ -154,17 +167,17 @@ application_config = {}
confs = YAML.load(yaml, deserialize_symbols: true)
# Ignore empty YAML file:
next if confs == false
- application_config.merge!(confs['common'] || {})
- application_config.merge!(confs[::Rails.env.to_s] || {})
+ application_config.deep_merge!(confs['common'] || {})
+ application_config.deep_merge!(confs[::Rails.env.to_s] || {})
end
end
db_config = {}
-path = "#{::Rails.root.to_s}/config/database.ymlx"
+path = "#{::Rails.root.to_s}/config/database.yml"
if File.exist? path
yaml = ERB.new(IO.read path).result(binding)
confs = YAML.load(yaml, deserialize_symbols: true)
- db_config.merge!(confs[::Rails.env.to_s] || {})
+ db_config.deep_merge!(confs[::Rails.env.to_s] || {})
end
$remaining_config = arvcfg.migrate_config(application_config, $arvados_config)
@@ -183,22 +196,29 @@ end
arvcfg.coercion_and_check $arvados_config
dbcfg.coercion_and_check $arvados_config
+#
+# Special case for test database, because the Arvados config.yml
+# doesn't have a concept of multiple rails environments.
+#
+if ::Rails.env.to_s == "test"
+ $arvados_config["PostgreSQL"]["Connection"]["DBName"] = "arvados_test"
+end
+
+dbhost = $arvados_config["PostgreSQL"]["Connection"]["Host"]
+if $arvados_config["PostgreSQL"]["Connection"]["Post"] != 0
+ dbhost += ":#{$arvados_config["PostgreSQL"]["Connection"]["Post"]}"
+end
+
+#
+# If DATABASE_URL is set, then ActiveRecord won't error out if database.yml doesn't exist.
+#
+# For config migration, we've previously populated the PostgreSQL
+# section of the config from database.yml
+#
+ENV["DATABASE_URL"] = "postgresql://#{$arvados_config["PostgreSQL"]["Connection"]["User"]}:#{$arvados_config["PostgreSQL"]["Connection"]["Password"]}@#{dbhost}/#{$arvados_config["PostgreSQL"]["Connection"]["DBName"]}?template=#{$arvados_config["PostgreSQL"]["Connection"]["Template"]}&encoding=#{$arvados_config["PostgreSQL"]["Connection"]["client_encoding"]}&pool=#{$arvados_config["PostgreSQL"]["ConnectionPool"]}"
+
Server::Application.configure do
ConfigLoader.copy_into_config $arvados_config, config
ConfigLoader.copy_into_config $remaining_config, config
config.secret_key_base = config.secret_token
-
- dbcfg = {}
- dbcfg[::Rails.env.to_s] = {
- adapter: 'postgresql',
- template: $arvados_config["PostgreSQL"]["Connection"]["Template"],
- encoding: $arvados_config["PostgreSQL"]["Connection"]["Encoding"],
- database: $arvados_config["PostgreSQL"]["Connection"]["DBName"],
- username: $arvados_config["PostgreSQL"]["Connection"]["User"],
- password: $arvados_config["PostgreSQL"]["Connection"]["Password"],
- host: $arvados_config["PostgreSQL"]["Connection"]["Host"],
- port: $arvados_config["PostgreSQL"]["Connection"]["Port"],
- pool: $arvados_config["PostgreSQL"]["ConnectionPool"]
- }
- Rails.application.config.database_configuration = dbcfg
end
diff --git a/services/api/config/environment.rb b/services/api/config/environment.rb
index b82ba27f9..fbca77736 100644
--- a/services/api/config/environment.rb
+++ b/services/api/config/environment.rb
@@ -5,6 +5,7 @@
# Load the rails application
require_relative 'application'
require 'josh_id'
+require_relative 'arvados_config'
# Initialize the rails application
Rails.application.initialize!
diff --git a/services/api/config/initializers/legacy_jobs_api.rb b/services/api/config/initializers/legacy_jobs_api.rb
index 9ea6b2884..8f3b3cb5f 100644
--- a/services/api/config/initializers/legacy_jobs_api.rb
+++ b/services/api/config/initializers/legacy_jobs_api.rb
@@ -5,7 +5,6 @@
# Config must be done before we files; otherwise they
# won't be able to use Rails.configuration.* to initialize their
# classes.
-require_relative 'load_config.rb'
require 'enable_jobs_api'
diff --git a/services/api/config/initializers/preload_all_models.rb b/services/api/config/initializers/preload_all_models.rb
index 0ab2b032a..713c61fd7 100644
--- a/services/api/config/initializers/preload_all_models.rb
+++ b/services/api/config/initializers/preload_all_models.rb
@@ -7,7 +7,6 @@
# Config must be done before we load model class files; otherwise they
# won't be able to use Rails.configuration.* to initialize their
# classes.
-require_relative 'load_config.rb'
if Rails.env == 'development'
Dir.foreach("#{Rails.root}/app/models") do |model_file|
diff --git a/services/api/lib/tasks/config_diff.rake b/services/api/lib/tasks/config_diff.rake
deleted file mode 100644
index 0fd6fdd46..000000000
--- a/services/api/lib/tasks/config_diff.rake
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) The Arvados Authors. All rights reserved.
-#
-# SPDX-License-Identifier: AGPL-3.0
-
-def diff_hash base, final
- diffed = {}
- base.each do |k,v|
- bk = base[k]
- fk = final[k]
- if bk.is_a? Hash
- d = diff_hash bk, fk
- if d.length > 0
- diffed[k] = d
- end
- else
- if bk.to_s != fk.to_s
- diffed[k] = fk
- end
- end
- end
- diffed
-end
-
-namespace :config do
- desc 'Print configuration loaded from legacy application.yml as new Arvados configuration structure'
- task diff: :environment do
- diffed = diff_hash $base_arvados_config, $arvados_config
- cfg = { "Clusters" => {}}
- cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
- puts cfg.to_yaml
- end
-end
diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb
index c8dcfef23..6f0233306 100644
--- a/services/api/test/unit/container_request_test.rb
+++ b/services/api/test/unit/container_request_test.rb
@@ -525,7 +525,6 @@ class ContainerRequestTest < ActiveSupport::TestCase
'ENOEXIST',
'arvados/apitestfixture:ENOEXIST',
].each do |img|
- puts "RC", Rails.configuration.RemoteClusters
test "container_image_for_container(#{img.inspect}) => 422" do
set_user_from_auth :active
assert_raises(ArvadosModel::UnresolvableContainerError) do
commit eb0bb0118051b0acbff09cf87287ad83a48ee337
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Apr 2 11:28:23 2019 -0400
13996: Refactor, create ConfigLoader class
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index 58c1c86dd..3da6dc803 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -63,6 +63,17 @@ Clusters:
ExternalURL: ""
Workbench2:
ExternalURL: ""
+ PostgreSQL:
+ # max concurrent connections per arvados server daemon
+ ConnectionPool: 32
+ Connection:
+ # All parameters here are passed to the PG client library in a connection string;
+ # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
+ Host: ""
+ Port: 0
+ User: ""
+ Password: ""
+ DBName: ""
API:
# Maximum size (in bytes) allowed for a single API request. This
# limit is published in the discovery document for use by clients.
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 96f6a7f04..8bed5c655 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -43,82 +43,84 @@ end
$base_arvados_config = $arvados_config.deep_dup
-declare_config "ClusterID", NonemptyString, :uuid_prefix
-declare_config "ManagementToken", String, :ManagementToken
-declare_config "Git.Repositories", String, :git_repositories_dir
-declare_config "API.DisabledAPIs", Array, :disable_api_methods
-declare_config "API.MaxRequestSize", Integer, :max_request_size
-declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
-declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
-declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
-declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
-declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
-declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
-declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
-declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
-declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
-declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
-declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address
-declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
-declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
-declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
-declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
-declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
-declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
-declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
-declare_config "TLS.Insecure", Boolean, :sso_insecure
-declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
-declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
-declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
-declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
-declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
-declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
-declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
-declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
-declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
-declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
-declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
-declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
-declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
-declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
-declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
-declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
-declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
-declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
-declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances
-declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
-declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
-declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
-declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
-declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
-declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
-declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
-declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
-declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
-declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
-declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
-declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir
-declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template
-declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
-declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
-declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
-declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
-declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
-declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
-declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
-declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
-declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
-declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
-declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
-declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
-declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
-declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
-declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
-declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
-declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
-declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
-declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" }
-declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
+arvcfg = ConfigLoader.new
+
+arvcfg.declare_config "ClusterID", NonemptyString, :uuid_prefix
+arvcfg.declare_config "ManagementToken", String, :ManagementToken
+arvcfg.declare_config "Git.Repositories", String, :git_repositories_dir
+arvcfg.declare_config "API.DisabledAPIs", Array, :disable_api_methods
+arvcfg.declare_config "API.MaxRequestSize", Integer, :max_request_size
+arvcfg.declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
+arvcfg.declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
+arvcfg.declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
+arvcfg.declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
+arvcfg.declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
+arvcfg.declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
+arvcfg.declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+arvcfg.declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
+arvcfg.declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
+arvcfg.declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
+arvcfg.declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address
+arvcfg.declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
+arvcfg.declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
+arvcfg.declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
+arvcfg.declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
+arvcfg.declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+arvcfg.declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
+arvcfg.declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
+arvcfg.declare_config "TLS.Insecure", Boolean, :sso_insecure
+arvcfg.declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
+arvcfg.declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
+arvcfg.declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
+arvcfg.declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+arvcfg.declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
+arvcfg.declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
+arvcfg.declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
+arvcfg.declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
+arvcfg.declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
+arvcfg.declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
+arvcfg.declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
+arvcfg.declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
+arvcfg.declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+arvcfg.declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+arvcfg.declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
+arvcfg.declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
+arvcfg.declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
+arvcfg.declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
+arvcfg.declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances
+arvcfg.declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
+arvcfg.declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
+arvcfg.declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
+arvcfg.declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
+arvcfg.declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
+arvcfg.declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
+arvcfg.declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
+arvcfg.declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
+arvcfg.declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
+arvcfg.declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
+arvcfg.declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
+arvcfg.declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
+arvcfg.declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+arvcfg.declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
+arvcfg.declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
+arvcfg.declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
+arvcfg.declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
+arvcfg.declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
+arvcfg.declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
+arvcfg.declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
+arvcfg.declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
+arvcfg.declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
+arvcfg.declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
+arvcfg.declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
+arvcfg.declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
+arvcfg.declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
+arvcfg.declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
+arvcfg.declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" }
+arvcfg.declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
h = {}
v.each do |clusterid, host|
h[clusterid] = {
@@ -129,9 +131,20 @@ declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
"ActivateUsers" => false
}
end
- set_cfg cfg, "RemoteClusters", h
+ ConfigLoader.set_cfg cfg, "RemoteClusters", h
}
-declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns
+arvcfg.declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns
+
+dbcfg = ConfigLoader.new
+
+dbcfg.declare_config "PostgreSQL.ConnectionPool", Integer, :pool
+dbcfg.declare_config "PostgreSQL.Connection.Host", String, :host
+dbcfg.declare_config "PostgreSQL.Connection.Port", Integer, :port
+dbcfg.declare_config "PostgreSQL.Connection.User", String, :username
+dbcfg.declare_config "PostgreSQL.Connection.Password", String, :password
+dbcfg.declare_config "PostgreSQL.Connection.DBName", String, :database
+dbcfg.declare_config "PostgreSQL.Connection.Template", String, :template
+dbcfg.declare_config "PostgreSQL.Connection.Encoding", String, :encoding
application_config = {}
%w(application.default application).each do |cfgfile|
@@ -146,7 +159,16 @@ application_config = {}
end
end
-$remaining_config = migrate_config application_config, $arvados_config
+db_config = {}
+path = "#{::Rails.root.to_s}/config/database.ymlx"
+if File.exist? path
+ yaml = ERB.new(IO.read path).result(binding)
+ confs = YAML.load(yaml, deserialize_symbols: true)
+ db_config.merge!(confs[::Rails.env.to_s] || {})
+end
+
+$remaining_config = arvcfg.migrate_config(application_config, $arvados_config)
+dbcfg.migrate_config(db_config, $arvados_config)
if application_config[:auto_activate_users_from]
application_config[:auto_activate_users_from].each do |cluster|
@@ -158,10 +180,25 @@ end
# Checks for wrongly typed configuration items, and essential items
# that can't be empty
-coercion_and_check $arvados_config
+arvcfg.coercion_and_check $arvados_config
+dbcfg.coercion_and_check $arvados_config
Server::Application.configure do
- copy_into_config $arvados_config, config
- copy_into_config $remaining_config, config
+ ConfigLoader.copy_into_config $arvados_config, config
+ ConfigLoader.copy_into_config $remaining_config, config
config.secret_key_base = config.secret_token
+
+ dbcfg = {}
+ dbcfg[::Rails.env.to_s] = {
+ adapter: 'postgresql',
+ template: $arvados_config["PostgreSQL"]["Connection"]["Template"],
+ encoding: $arvados_config["PostgreSQL"]["Connection"]["Encoding"],
+ database: $arvados_config["PostgreSQL"]["Connection"]["DBName"],
+ username: $arvados_config["PostgreSQL"]["Connection"]["User"],
+ password: $arvados_config["PostgreSQL"]["Connection"]["Password"],
+ host: $arvados_config["PostgreSQL"]["Connection"]["Host"],
+ port: $arvados_config["PostgreSQL"]["Connection"]["Port"],
+ pool: $arvados_config["PostgreSQL"]["ConnectionPool"]
+ }
+ Rails.application.config.database_configuration = dbcfg
end
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
index 8b31a62c4..2d1ddd8b8 100644
--- a/services/api/lib/config_loader.rb
+++ b/services/api/lib/config_loader.rb
@@ -40,31 +40,6 @@ module Psych
end
end
-def set_cfg cfg, k, v
- # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz }
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg[kk]
- if cfg.nil?
- break
- end
- end
- if !cfg.nil?
- cfg[k] = v
- end
-end
-
-$config_migrate_map = {}
-$config_types = {}
-def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
- if migrate_from
- $config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
- set_cfg cfg, assign_to, v
- }
- end
- $config_types[assign_to] = configtype
-end
module Boolean; end
class TrueClass; include Boolean; end
@@ -73,100 +48,132 @@ class FalseClass; include Boolean; end
class NonemptyString < String
end
-def parse_duration durstr
- duration_re = /(\d+(\.\d+)?)(s|m|h)/
- dursec = 0
- while durstr != ""
- mt = duration_re.match durstr
- if !mt
- raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are s, m, h"
+class ConfigLoader
+ def initialize
+ @config_migrate_map = {}
+ @config_types = {}
+ end
+
+ def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
+ if migrate_from
+ @config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
+ ConfigLoader.set_cfg cfg, assign_to, v
+ }
end
- multiplier = {s: 1, m: 60, h: 3600}
- dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
- durstr = durstr[mt[0].length..-1]
+ @config_types[assign_to] = configtype
end
- return dursec.seconds
-end
-def migrate_config from_config, to_config
- remainders = {}
- from_config.each do |k, v|
- if $config_migrate_map[k.to_sym]
- $config_migrate_map[k.to_sym].call to_config, k, v
- else
- remainders[k] = v
+
+ def migrate_config from_config, to_config
+ remainders = {}
+ from_config.each do |k, v|
+ if @config_migrate_map[k.to_sym]
+ @config_migrate_map[k.to_sym].call to_config, k, v
+ else
+ remainders[k] = v
+ end
end
+ remainders
end
- remainders
-end
-def coercion_and_check check_cfg
- $config_types.each do |cfgkey, cfgtype|
- cfg = check_cfg
- k = cfgkey
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg[kk]
+ def coercion_and_check check_cfg
+ @config_types.each do |cfgkey, cfgtype|
+ cfg = check_cfg
+ k = cfgkey
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+
if cfg.nil?
- break
+ raise "missing #{cfgkey}"
end
- end
- if cfg.nil?
- raise "missing #{cfgkey}"
- end
+ if cfgtype == String and !cfg[k]
+ cfg[k] = ""
+ end
- if cfgtype == String and !cfg[k]
- cfg[k] = ""
- end
+ if cfgtype == String and cfg[k].is_a? Symbol
+ cfg[k] = cfg[k].to_s
+ end
- if cfgtype == String and cfg[k].is_a? Symbol
- cfg[k] = cfg[k].to_s
- end
+ if cfgtype == Pathname and cfg[k].is_a? String
- if cfgtype == Pathname and cfg[k].is_a? String
+ if cfg[k] == ""
+ cfg[k] = Pathname.new("")
+ else
+ cfg[k] = Pathname.new(cfg[k])
+ if !cfg[k].exist?
+ raise "#{cfgkey} path #{cfg[k]} does not exist"
+ end
+ end
+ end
- if cfg[k] == ""
- cfg[k] = Pathname.new("")
- else
- cfg[k] = Pathname.new(cfg[k])
- if !cfg[k].exist?
- raise "#{cfgkey} path #{cfg[k]} does not exist"
+ if cfgtype == NonemptyString
+ if (!cfg[k] || cfg[k] == "")
+ raise "#{cfgkey} cannot be empty"
+ end
+ if cfg[k].is_a? String
+ next
end
end
- end
- if cfgtype == NonemptyString
- if (!cfg[k] || cfg[k] == "")
- raise "#{cfgkey} cannot be empty"
+ if cfgtype == ActiveSupport::Duration
+ if cfg[k].is_a? Integer
+ cfg[k] = cfg[k].seconds
+ elsif cfg[k].is_a? String
+ cfg[k] = ConfigLoader.parse_duration cfg[k]
+ end
end
- if cfg[k].is_a? String
- next
+
+ if cfgtype == URI
+ cfg[k] = URI(cfg[k])
end
- end
- if cfgtype == ActiveSupport::Duration
- if cfg[k].is_a? Integer
- cfg[k] = cfg[k].seconds
- elsif cfg[k].is_a? String
- cfg[k] = parse_duration cfg[k]
+ if !cfg[k].is_a? cfgtype
+ raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
end
end
+ end
- if cfgtype == URI
- cfg[k] = URI(cfg[k])
+ def self.set_cfg cfg, k, v
+ # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz }
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
end
-
- if !cfg[k].is_a? cfgtype
- raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+ if !cfg.nil?
+ cfg[k] = v
end
end
-end
+ def self.parse_duration durstr
+ duration_re = /(\d+(\.\d+)?)(s|m|h)/
+ dursec = 0
+ while durstr != ""
+ mt = duration_re.match durstr
+ if !mt
+ raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are s, m, h"
+ end
+ multiplier = {s: 1, m: 60, h: 3600}
+ dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
+ durstr = durstr[mt[0].length..-1]
+ end
+ return dursec.seconds
+ end
-def copy_into_config src, dst
- src.each do |k, v|
- dst.send "#{k}=", Marshal.load(Marshal.dump v)
+ def self.copy_into_config src, dst
+ src.each do |k, v|
+ dst.send "#{k}=", Marshal.load(Marshal.dump v)
+ end
end
+
end
diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
index d92561fc1..537fe5252 100644
--- a/services/api/test/functional/arvados/v1/repositories_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
@@ -208,7 +208,7 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
{cfg: "GitHTTP", cfgval: false, refute: /^http/ },
].each do |expect|
test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
- set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval]
+ ConfigLoader.set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval]
authorize_with :active
get :index
assert_response :success
diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb
index e87a1c6d8..5747a85cf 100644
--- a/services/api/test/test_helper.rb
+++ b/services/api/test/test_helper.rb
@@ -99,8 +99,8 @@ class ActiveSupport::TestCase
def restore_configuration
# Restore configuration settings changed during tests
- copy_into_config $arvados_config, Rails.configuration
- copy_into_config $remaining_config, Rails.configuration
+ ConfigLoader.copy_into_config $arvados_config, Rails.configuration
+ ConfigLoader.copy_into_config $remaining_config, Rails.configuration
end
def set_user_from_auth(auth_name)
commit 90944740f40ab0dbfc4bdfc1b16accfbf6559e4f
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 17:52:35 2019 -0400
13996: Adjust config:dump to dump active config
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
index 844e8b7c8..1790a7950 100644
--- a/services/api/lib/tasks/config_dump.rake
+++ b/services/api/lib/tasks/config_dump.rake
@@ -5,8 +5,8 @@
namespace :config do
desc 'Print active site configuration'
task dump: :environment do
- cfg = { "Clusters" => {}}
- cfg["Clusters"][$arvados_config["ClusterID"]] = $arvados_config.select {|k,v| k != "ClusterID"}
- puts cfg.to_yaml
+ combined = $arvados_config.deep_dup
+ combined.update $remaining_config
+ puts combined.to_yaml
end
end
commit ff043c7f69a599f35c47a283cc7f78addffe5fdc
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 17:34:30 2019 -0400
13996: Update comments
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 51b2bec0d..96f6a7f04 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -45,7 +45,7 @@ $base_arvados_config = $arvados_config.deep_dup
declare_config "ClusterID", NonemptyString, :uuid_prefix
declare_config "ManagementToken", String, :ManagementToken
-declare_config "Git.Repositories", Pathname, :git_repositories_dir
+declare_config "Git.Repositories", String, :git_repositories_dir
declare_config "API.DisabledAPIs", Array, :disable_api_methods
declare_config "API.MaxRequestSize", Integer, :max_request_size
declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
diff --git a/services/api/lib/tasks/config_diff.rake b/services/api/lib/tasks/config_diff.rake
index a9249df3a..0fd6fdd46 100644
--- a/services/api/lib/tasks/config_diff.rake
+++ b/services/api/lib/tasks/config_diff.rake
@@ -22,7 +22,7 @@ def diff_hash base, final
end
namespace :config do
- desc 'Diff site configuration'
+ desc 'Print configuration loaded from legacy application.yml as new Arvados configuration structure'
task diff: :environment do
diffed = diff_hash $base_arvados_config, $arvados_config
cfg = { "Clusters" => {}}
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
index bc6deb4bb..844e8b7c8 100644
--- a/services/api/lib/tasks/config_dump.rake
+++ b/services/api/lib/tasks/config_dump.rake
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: AGPL-3.0
namespace :config do
- desc 'Show site configuration'
+ desc 'Print active site configuration'
task dump: :environment do
cfg = { "Clusters" => {}}
cfg["Clusters"][$arvados_config["ClusterID"]] = $arvados_config.select {|k,v| k != "ClusterID"}
commit 7112c01c75ad8be748b33b86a033e668e607eb6c
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 17:10:00 2019 -0400
13996: Add rake config:diff
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 79cf5bd0f..51b2bec0d 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -41,6 +41,8 @@ $arvados_config = {}
end
end
+$base_arvados_config = $arvados_config.deep_dup
+
declare_config "ClusterID", NonemptyString, :uuid_prefix
declare_config "ManagementToken", String, :ManagementToken
declare_config "Git.Repositories", Pathname, :git_repositories_dir
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
index 3e3c82a36..8b31a62c4 100644
--- a/services/api/lib/config_loader.rb
+++ b/services/api/lib/config_loader.rb
@@ -24,6 +24,18 @@ module Psych
end
@emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY
end
+
+ def visit_URI_Generic o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_URI_HTTP o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+
+ def visit_Pathname o
+ @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY
+ end
end
end
end
diff --git a/services/api/lib/tasks/config_diff.rake b/services/api/lib/tasks/config_diff.rake
new file mode 100644
index 000000000..a9249df3a
--- /dev/null
+++ b/services/api/lib/tasks/config_diff.rake
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+def diff_hash base, final
+ diffed = {}
+ base.each do |k,v|
+ bk = base[k]
+ fk = final[k]
+ if bk.is_a? Hash
+ d = diff_hash bk, fk
+ if d.length > 0
+ diffed[k] = d
+ end
+ else
+ if bk.to_s != fk.to_s
+ diffed[k] = fk
+ end
+ end
+ end
+ diffed
+end
+
+namespace :config do
+ desc 'Diff site configuration'
+ task diff: :environment do
+ diffed = diff_hash $base_arvados_config, $arvados_config
+ cfg = { "Clusters" => {}}
+ cfg["Clusters"][$arvados_config["ClusterID"]] = diffed.select {|k,v| k != "ClusterID"}
+ puts cfg.to_yaml
+ end
+end
commit f98a4eb7fcd4f762c8dc707ec1661ca5a5802f6e
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 16:31:06 2019 -0400
13996: Fix discovery document remoteHosts
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 222b106a9..d67568d43 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -25,6 +25,8 @@ class Arvados::V1::SchemaController < ApplicationController
def discovery_doc
Rails.cache.fetch 'arvados_v1_rest_discovery' do
Rails.application.eager_load!
+ remoteHosts = {}
+ Rails.configuration.RemoteClusters.each {|k,v| if k != "*" then remoteHosts[k] = v["Host"] end }
discovery = {
kind: "discovery#restDescription",
discoveryVersion: "v1",
@@ -61,7 +63,7 @@ class Arvados::V1::SchemaController < ApplicationController
crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"],
crunchLogUpdatePeriod: Rails.configuration.Containers["Logging"]["LogUpdatePeriod"],
crunchLogUpdateSize: Rails.configuration.Containers["Logging"]["LogUpdateSize"],
- remoteHosts: Rails.configuration.RemoteClusters.map {|k,v| v["Host"]},
+ remoteHosts: remoteHosts,
remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"]["Proxy"],
websocketUrl: Rails.configuration.Services["Websocket"]["ExternalURL"].to_s,
workbenchUrl: Rails.configuration.Services["Workbench1"]["ExternalURL"].to_s,
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
index 24756b93c..6923f43cb 100644
--- a/services/api/app/controllers/static_controller.rb
+++ b/services/api/app/controllers/static_controller.rb
@@ -12,7 +12,7 @@ class StaticController < ApplicationController
def home
respond_to do |f|
f.html do
- if !Rails.configuration.Services["Workbench1"]["ExternalURL"].empty?
+ if !Rails.configuration.Services["Workbench1"]["ExternalURL"].to_s.empty?
redirect_to Rails.configuration.Services["Workbench1"]["ExternalURL"]
else
render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
commit 17028c6f18a3e83b39de1655b6bfeafe2938220d
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 14:38:59 2019 -0400
13996: Explicitly get string representation of URLs
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 5f042877d..222b106a9 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -63,10 +63,10 @@ class Arvados::V1::SchemaController < ApplicationController
crunchLogUpdateSize: Rails.configuration.Containers["Logging"]["LogUpdateSize"],
remoteHosts: Rails.configuration.RemoteClusters.map {|k,v| v["Host"]},
remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"]["Proxy"],
- websocketUrl: Rails.configuration.Services["Websocket"]["ExternalURL"],
- workbenchUrl: Rails.configuration.Services["Workbench1"]["ExternalURL"],
- keepWebServiceUrl: Rails.configuration.Services["WebDAV"]["ExternalURL"],
- gitUrl: Rails.configuration.Services["GitHTTP"]["ExternalURL"],
+ websocketUrl: Rails.configuration.Services["Websocket"]["ExternalURL"].to_s,
+ workbenchUrl: Rails.configuration.Services["Workbench1"]["ExternalURL"].to_s,
+ keepWebServiceUrl: Rails.configuration.Services["WebDAV"]["ExternalURL"].to_s,
+ gitUrl: Rails.configuration.Services["GitHTTP"]["ExternalURL"].to_s,
parameters: {
alt: {
type: "string",
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
index 5c7b54067..80cd6de16 100644
--- a/services/api/test/functional/arvados/v1/schema_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/schema_controller_test.rb
@@ -33,8 +33,8 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
assert_match(/^unknown$/, discovery_doc['packageVersion'])
- assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services["Websocket"]["ExternalURL"]
- assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services["Workbench1"]["ExternalURL"]
+ assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services["Websocket"]["ExternalURL"].to_s
+ assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services["Workbench1"]["ExternalURL"].to_s
assert_equal('zzzzz', discovery_doc['uuidPrefix'])
end
commit a3a0940ccf0aeacb45aa7216035a2d815ddf7bcf
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 14:07:53 2019 -0400
13996: Don't force check for GitInternalDir, breaks tests
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index f30dae09f..79cf5bd0f 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -106,7 +106,7 @@ declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(
declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
-declare_config "Containers.JobsAPI.GitInternalDir", Pathname, :git_internal_dir
+declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
commit 1a613db714afd5f1c5a14a807fea02e125a4a75b
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 26 13:43:39 2019 -0400
13996: Unit tests pass
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/app/controllers/arvados/v1/healthcheck_controller.rb b/services/api/app/controllers/arvados/v1/healthcheck_controller.rb
index c12bc6e90..6c3822437 100644
--- a/services/api/app/controllers/arvados/v1/healthcheck_controller.rb
+++ b/services/api/app/controllers/arvados/v1/healthcheck_controller.rb
@@ -19,7 +19,7 @@ class Arvados::V1::HealthcheckController < ApplicationController
mgmt_token = Rails.configuration.ManagementToken
auth_header = request.headers['Authorization']
- if !mgmt_token
+ if mgmt_token == ""
send_json ({"errors" => "disabled"}), status: 404
elsif !auth_header
send_json ({"errors" => "authorization required"}), status: 401
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index bf569a2d8..0a10be80e 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -199,8 +199,8 @@ class Node < ArvadosModel
ptr_domain: ptr_domain,
}
- if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].empty? and
- !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"].empty?)
+ if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].to_s.empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"].to_s.empty?)
tmpfile = nil
begin
begin
@@ -236,8 +236,8 @@ class Node < ArvadosModel
end
end
- if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].empty? and
- !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"].empty?)
+ if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].to_s.empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"].to_s.empty?)
restartfile = File.join(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], 'restart.txt')
begin
File.open(restartfile, 'w') do |f|
@@ -263,8 +263,8 @@ class Node < ArvadosModel
# At startup, make sure all DNS entries exist. Otherwise, slurmctld
# will refuse to start.
- if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].empty? and
- !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"].empty? and
+ if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].to_s.empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"].to_s.empty? and
!Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"].empty?)
(0..Rails.configuration.Containers["MaxComputeVMs"]-1).each do |slot_number|
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 9aa6fe2c9..17ee4d999 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -493,7 +493,7 @@ class User < ArvadosModel
# create login permission for the given vm_uuid, if it does not already exist
def create_vm_login_permission_link(vm_uuid, repo_name)
# vm uuid is optional
- return if !vm_uuid
+ return if vm_uuid == ""
vm = VirtualMachine.where(uuid: vm_uuid).first
if !vm
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index bc58dbfc5..f30dae09f 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -43,7 +43,7 @@ end
declare_config "ClusterID", NonemptyString, :uuid_prefix
declare_config "ManagementToken", String, :ManagementToken
-declare_config "Git.Repositories", String, :git_repositories_dir
+declare_config "Git.Repositories", Pathname, :git_repositories_dir
declare_config "API.DisabledAPIs", Array, :disable_api_methods
declare_config "API.MaxRequestSize", Integer, :max_request_size
declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
@@ -95,8 +95,8 @@ declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport:
declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
-declare_config "Containers.SLURM.Managed.DNSServerConfDir", String, :dns_server_conf_dir
-declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", String, :dns_server_conf_template
+declare_config "Containers.SLURM.Managed.DNSServerConfDir", Pathname, :dns_server_conf_dir
+declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", Pathname, :dns_server_conf_template
declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
@@ -106,7 +106,7 @@ declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(
declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
-declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
+declare_config "Containers.JobsAPI.GitInternalDir", Pathname, :git_internal_dir
declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
index fbb7213fa..3e3c82a36 100644
--- a/services/api/lib/config_loader.rb
+++ b/services/api/lib/config_loader.rb
@@ -113,6 +113,18 @@ def coercion_and_check check_cfg
cfg[k] = cfg[k].to_s
end
+ if cfgtype == Pathname and cfg[k].is_a? String
+
+ if cfg[k] == ""
+ cfg[k] = Pathname.new("")
+ else
+ cfg[k] = Pathname.new(cfg[k])
+ if !cfg[k].exist?
+ raise "#{cfgkey} path #{cfg[k]} does not exist"
+ end
+ end
+ end
+
if cfgtype == NonemptyString
if (!cfg[k] || cfg[k] == "")
raise "#{cfgkey} cannot be empty"
diff --git a/services/api/lib/crunch_dispatch.rb b/services/api/lib/crunch_dispatch.rb
index f3e17ffa2..0ebb510b5 100644
--- a/services/api/lib/crunch_dispatch.rb
+++ b/services/api/lib/crunch_dispatch.rb
@@ -338,13 +338,13 @@ class CrunchDispatch
cmd_args = nil
case Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"]
- when :none
+ when "none"
if @running.size > 0
# Don't run more than one at a time.
return
end
cmd_args = []
- when :slurm_immediate
+ when "slurm_immediate"
nodelist = nodes_available_for_job(job)
if nodelist.nil?
if Time.now < @node_wait_deadline
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
index 9d4912805..5c7b54067 100644
--- a/services/api/test/functional/arvados/v1/schema_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/schema_controller_test.rb
@@ -29,12 +29,12 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
assert_response :success
discovery_doc = JSON.parse(@response.body)
assert_includes discovery_doc, 'defaultTrashLifetime'
- assert_equal discovery_doc['defaultTrashLifetime'], Rails.application.config.default_trash_lifetime
+ assert_equal discovery_doc['defaultTrashLifetime'], Rails.configuration.Collections["DefaultTrashLifetime"]
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])
assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])
assert_match(/^unknown$/, discovery_doc['packageVersion'])
- assert_equal discovery_doc['websocketUrl'], Rails.application.config.websocket_address
- assert_equal discovery_doc['workbenchUrl'], Rails.application.config.workbench_address
+ assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services["Websocket"]["ExternalURL"]
+ assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services["Workbench1"]["ExternalURL"]
assert_equal('zzzzz', discovery_doc['uuidPrefix'])
end
diff --git a/services/api/test/unit/node_test.rb b/services/api/test/unit/node_test.rb
index 7e6a01db6..97eddc215 100644
--- a/services/api/test/unit/node_test.rb
+++ b/services/api/test/unit/node_test.rb
@@ -75,8 +75,8 @@ class NodeTest < ActiveSupport::TestCase
end
test "dns update with no commands/dirs configured" do
- Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] = false
- Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = false
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] = ""
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = ""
Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] = 'ignored!'
Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"] = 'ignored!'
assert Node.dns_server_update 'compute65535', '127.0.0.127'
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
index 52333db8e..5ee9bb039 100644
--- a/services/api/test/unit/user_test.rb
+++ b/services/api/test/unit/user_test.rb
@@ -157,8 +157,8 @@ class UserTest < ActiveSupport::TestCase
[false, 'bar at example.com', nil, true],
[true, 'foo at example.com', true, nil],
[true, 'bar at example.com', true, true],
- [false, false, nil, nil],
- [true, false, true, nil]
+ [false, '', nil, nil],
+ [true, '', true, nil]
].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|
# In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.
test "auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}" do
@@ -657,7 +657,7 @@ class UserTest < ActiveSupport::TestCase
named_repo.uuid, user.uuid, "permission", "can_manage")
end
# Check for VM login.
- if auto_vm_uuid = Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"]
+ if (auto_vm_uuid = Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"]) != ""
verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
"permission", "can_login", "username", expect_username)
end
commit f9ae5b90a5e04477133ca7a7d34bd3eebf862474
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Mon Mar 25 16:39:23 2019 -0400
13996: Fixing test WIP
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index 1ec921b8a..5c97735f2 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -81,10 +81,12 @@ class ApplicationController < ActionController::Base
def default_url_options
options = {}
- exturl = URI.parse(Rails.configuration.Services["Controller"]["ExternalURL"])
- options[:host] = exturl.host
- options[:port] = exturl.port
- options[:protocol] = exturl.scheme
+ if Rails.configuration.Services["Controller"]["ExternalURL"] != ""
+ exturl = URI.parse(Rails.configuration.Services["Controller"]["ExternalURL"])
+ options[:host] = exturl.host
+ options[:port] = exturl.port
+ options[:protocol] = exturl.scheme
+ end
options
end
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index ced2f1620..5f042877d 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -46,27 +46,27 @@ class Arvados::V1::SchemaController < ApplicationController
rootUrl: root_url,
servicePath: "arvados/v1/",
batchPath: "batch",
- uuidPrefix: Rails.application.config.ClusterID,
- defaultTrashLifetime: Rails.application.config.Collections["DefaultTrashLifetime"],
- blobSignatureTtl: Rails.application.config.Collections["BlobSigningTTL"],
- maxRequestSize: Rails.application.config.API["MaxRequestSize"],
- maxItemsPerResponse: Rails.application.API["MaxItemsPerResponse"],
- dockerImageFormats: Rails.application.config.Containers["SupportedDockerImageFormats"],
- crunchLogBytesPerEvent: Rails.application.config.Containers["Logging"]["LogBytesPerEvent"],
- crunchLogSecondsBetweenEvents: Rails.application.config.Containers["Logging"]["LogSecondsBetweenEvents"],
- crunchLogThrottlePeriod: Rails.application.config.Containers["Logging"]["LogThrottlePeriod"],
- crunchLogThrottleBytes: Rails.application.config.Containers["Logging"]["LogThrottleBytes"],
- crunchLogThrottleLines: Rails.application.config.Containers["Logging"]["LogThrottleLines"],
- crunchLimitLogBytesPerJob: Rails.application.config.Containers["Logging"]["LimitLogBytesPerJob"],
- crunchLogPartialLineThrottlePeriod: Rails.application.config.Containers["Logging"]["LogPartialLineThrottlePeriod"],
- crunchLogUpdatePeriod: Rails.application.config.Containers["Logging"]["LogUpdatePeriod"],
- crunchLogUpdateSize: Rails.application.config.Containers["Logging"]["LogUpdateSize"],
- remoteHosts: Rails.application.config.RemoteClusters.map {|k,v| v.Host},
- remoteHostsViaDNS: Rails.application.config.RemoteClusters["*"]["Proxy"],
- websocketUrl: Rails.application.config.Services["Websocket"]["ExternalURL"],
- workbenchUrl: Rails.application.config.Services["Workbench1"]["ExternalURL"],
- keepWebServiceUrl: Rails.application.config.Services["WebDAV"]["ExternalURL"],
- gitUrl: Rails.application.config.Services["GitHTTP"]["ExternalURL"],
+ uuidPrefix: Rails.configuration.ClusterID,
+ defaultTrashLifetime: Rails.configuration.Collections["DefaultTrashLifetime"],
+ blobSignatureTtl: Rails.configuration.Collections["BlobSigningTTL"],
+ maxRequestSize: Rails.configuration.API["MaxRequestSize"],
+ maxItemsPerResponse: Rails.configuration.API["MaxItemsPerResponse"],
+ dockerImageFormats: Rails.configuration.Containers["SupportedDockerImageFormats"],
+ crunchLogBytesPerEvent: Rails.configuration.Containers["Logging"]["LogBytesPerEvent"],
+ crunchLogSecondsBetweenEvents: Rails.configuration.Containers["Logging"]["LogSecondsBetweenEvents"],
+ crunchLogThrottlePeriod: Rails.configuration.Containers["Logging"]["LogThrottlePeriod"],
+ crunchLogThrottleBytes: Rails.configuration.Containers["Logging"]["LogThrottleBytes"],
+ crunchLogThrottleLines: Rails.configuration.Containers["Logging"]["LogThrottleLines"],
+ crunchLimitLogBytesPerJob: Rails.configuration.Containers["Logging"]["LimitLogBytesPerJob"],
+ crunchLogPartialLineThrottlePeriod: Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"],
+ crunchLogUpdatePeriod: Rails.configuration.Containers["Logging"]["LogUpdatePeriod"],
+ crunchLogUpdateSize: Rails.configuration.Containers["Logging"]["LogUpdateSize"],
+ remoteHosts: Rails.configuration.RemoteClusters.map {|k,v| v["Host"]},
+ remoteHostsViaDNS: Rails.configuration.RemoteClusters["*"]["Proxy"],
+ websocketUrl: Rails.configuration.Services["Websocket"]["ExternalURL"],
+ workbenchUrl: Rails.configuration.Services["Workbench1"]["ExternalURL"],
+ keepWebServiceUrl: Rails.configuration.Services["WebDAV"]["ExternalURL"],
+ gitUrl: Rails.configuration.Services["GitHTTP"]["ExternalURL"],
parameters: {
alt: {
type: "string",
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index 46da37afd..7f7a32007 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -518,7 +518,9 @@ class Collection < ArvadosModel
if loc = Keep::Locator.parse(search_term)
loc.strip_hints!
coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1)
- if coll_match.any? or Rails.configuration.RemoteClusters.length > 1
+ rc = Rails.configuration.RemoteClusters.select{ |k|
+ k != "*" && k != Rails.configuration.ClusterID}
+ if coll_match.any? or rc.length == 0
return get_compatible_images(readers, pattern, coll_match)
else
# Allow bare pdh that doesn't exist in the local database so
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
index deaa8ff71..b24df8cbd 100644
--- a/services/api/app/models/job.rb
+++ b/services/api/app/models/job.rb
@@ -491,7 +491,7 @@ class Job < ArvadosModel
end
def find_docker_image_locator
- if runtime_constraints.is_a? Hash
+ if runtime_constraints.is_a? Hash and Rails.configuration.Containers["JobsAPI"]["DefaultDockerImage"] != ""
runtime_constraints['docker_image'] ||=
Rails.configuration.Containers["JobsAPI"]["DefaultDockerImage"]
end
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
index ac89ecf6b..7cb36dcb9 100644
--- a/services/api/app/models/repository.rb
+++ b/services/api/app/models/repository.rb
@@ -49,7 +49,7 @@ class Repository < ArvadosModel
# prefers bare repositories over checkouts.
[["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
[:uuid, :name].each do |path_attr|
- git_dir = File.join(Rails.configuration.Containers["Git"]["Repositories"],
+ git_dir = File.join(Rails.configuration.Git["Repositories"],
repo_base % send(path_attr), *join_args)
return git_dir if File.exist?(git_dir)
end
@@ -98,21 +98,27 @@ class Repository < ArvadosModel
end
def ssh_clone_url
- _clone_url Rails.configuration.Services["GitSSH"]["ExternalURL"], 'git at git.%s.arvadosapi.com:'
+ _clone_url Rails.configuration.Services["GitSSH"].andand["ExternalURL"], 'ssh://git@git.%s.arvadosapi.com'
end
def https_clone_url
- _clone_url Rails.configuration.Services["GitHTTP"]["ExternalURL"], 'https://git.%s.arvadosapi.com/'
+ _clone_url Rails.configuration.Services["GitHTTP"].andand["ExternalURL"], 'https://git.%s.arvadosapi.com/'
end
def _clone_url config_var, default_base_fmt
- configured_base = config_var
+ if not config_var
+ return ""
+ end
prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
- if prefix == Rails.configuration.ClusterID and configured_base != true
- base = configured_base
+ if prefix == Rails.configuration.ClusterID and config_var != URI("")
+ base = config_var
+ else
+ base = URI(default_base_fmt % prefix)
+ end
+ if base.scheme == "ssh"
+ '%s@%s:%s.git' % [base.user, base.host, name]
else
- base = default_base_fmt % prefix
+ '%s%s.git' % [base, name]
end
- '%s%s.git' % [base, name]
end
end
diff --git a/services/api/app/views/user_notifier/account_is_setup.text.erb b/services/api/app/views/user_notifier/account_is_setup.text.erb
index 417698c38..357fd91ce 100644
--- a/services/api/app/views/user_notifier/account_is_setup.text.erb
+++ b/services/api/app/views/user_notifier/account_is_setup.text.erb
@@ -10,7 +10,7 @@ Hi there,
Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services["Workbench1"]["ExternalURL"] %>at
- <%= Rails.configuration.Services["Workbench1"]["ExternalURL"] %><%= "/" if !Rails.configuration.Services["Workbench1"]["ExternalURL"].end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
+ <%= Rails.configuration.Services["Workbench1"]["ExternalURL"] %><%= "/" if !Rails.configuration.Services["Workbench1"]["ExternalURL"].to_s.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
for connection instructions.
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 23a5eb5a9..bc58dbfc5 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -42,7 +42,7 @@ $arvados_config = {}
end
declare_config "ClusterID", NonemptyString, :uuid_prefix
-declare_config "ManagementToken", NonemptyString, :ManagementToken
+declare_config "ManagementToken", String, :ManagementToken
declare_config "Git.Repositories", String, :git_repositories_dir
declare_config "API.DisabledAPIs", Array, :disable_api_methods
declare_config "API.MaxRequestSize", Integer, :max_request_size
@@ -115,7 +115,7 @@ declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
-declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base
+declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base, ->(cfg, k, v) { set_cfg cfg, "Services.GitSSH.ExternalURL", "ssh://#{v}" }
declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
h = {}
v.each do |clusterid, host|
@@ -159,7 +159,6 @@ end
coercion_and_check $arvados_config
Server::Application.configure do
- nils = []
copy_into_config $arvados_config, config
copy_into_config $remaining_config, config
config.secret_key_base = config.secret_token
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
index 1a4135e84..fbb7213fa 100644
--- a/services/api/lib/config_loader.rb
+++ b/services/api/lib/config_loader.rb
@@ -109,6 +109,10 @@ def coercion_and_check check_cfg
cfg[k] = ""
end
+ if cfgtype == String and cfg[k].is_a? Symbol
+ cfg[k] = cfg[k].to_s
+ end
+
if cfgtype == NonemptyString
if (!cfg[k] || cfg[k] == "")
raise "#{cfgkey} cannot be empty"
diff --git a/services/api/lib/crunch_dispatch.rb b/services/api/lib/crunch_dispatch.rb
index eceada5a7..f3e17ffa2 100644
--- a/services/api/lib/crunch_dispatch.rb
+++ b/services/api/lib/crunch_dispatch.rb
@@ -110,7 +110,7 @@ class CrunchDispatch
end
def update_node_status
- return unless Server::Application.config.crunch_job_wrapper.to_s.match(/^slurm/)
+ return unless Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"].to_s.match(/^slurm/)
slurm_status.each_pair do |hostname, slurmdata|
next if @node_state[hostname] == slurmdata
begin
@@ -337,7 +337,7 @@ class CrunchDispatch
next if @running[job.uuid]
cmd_args = nil
- case Server::Application.config.crunch_job_wrapper
+ case Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"]
when :none
if @running.size > 0
# Don't run more than one at a time.
@@ -361,7 +361,7 @@ class CrunchDispatch
"--job-name=#{job.uuid}",
"--nodelist=#{nodelist.join(',')}"]
else
- raise "Unknown crunch_job_wrapper: #{Server::Application.config.crunch_job_wrapper}"
+ raise "Unknown crunch_job_wrapper: #{Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"]}"
end
cmd_args = sudo_preface + cmd_args
@@ -902,9 +902,9 @@ class CrunchDispatch
end
def sudo_preface
- return [] if not Server::Application.config.crunch_job_user
+ return [] if not Rails.configuration.Containers["JobsAPI"]["CrunchJobUser"]
["sudo", "-E", "-u",
- Server::Application.config.crunch_job_user,
+ Rails.configuration.Containers["JobsAPI"]["CrunchJobUser"],
"LD_LIBRARY_PATH=#{ENV['LD_LIBRARY_PATH']}",
"PATH=#{ENV['PATH']}",
"PERLLIB=#{ENV['PERLLIB']}",
@@ -957,7 +957,7 @@ class CrunchDispatch
# An array of job_uuids in squeue
def squeue_jobs
- if Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"].to_sym == :slurm_immediate
+ if Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"] == "slurm_immediate"
p = IO.popen(['squeue', '-a', '-h', '-o', '%j'])
begin
p.readlines.map {|line| line.strip}
diff --git a/services/api/lib/current_api_client.rb b/services/api/lib/current_api_client.rb
index 49638677b..c7b48c0cd 100644
--- a/services/api/lib/current_api_client.rb
+++ b/services/api/lib/current_api_client.rb
@@ -42,25 +42,25 @@ module CurrentApiClient
end
def system_user_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
User.uuid_prefix,
'000000000000000'].join('-')
end
def system_group_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
Group.uuid_prefix,
'000000000000000'].join('-')
end
def anonymous_group_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
Group.uuid_prefix,
'anonymouspublic'].join('-')
end
def anonymous_user_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
User.uuid_prefix,
'anonymouspublic'].join('-')
end
@@ -105,7 +105,7 @@ module CurrentApiClient
end
def all_users_group_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
Group.uuid_prefix,
'fffffffffffffff'].join('-')
end
diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb
index 4da6188d1..9d8709206 100644
--- a/services/api/lib/enable_jobs_api.rb
+++ b/services/api/lib/enable_jobs_api.rb
@@ -30,7 +30,8 @@ Disable_jobs_api_method_list = ["jobs.create",
"jobs.show",
"job_tasks.show"]
- if Rails.configuration.Containers["JobsAPI"]["Enable"] == false ||
+def check_enable_legacy_jobs_api
+ if Rails.configuration.Containers["JobsAPI"]["Enable"] == "false" ||
(Rails.configuration.Containers["JobsAPI"]["Enable"] == "auto" &&
Job.count == 0)
Rails.configuration.API["DisabledAPIs"] += Disable_jobs_api_method_list
diff --git a/services/api/lib/has_uuid.rb b/services/api/lib/has_uuid.rb
index 60db53d5e..207456694 100644
--- a/services/api/lib/has_uuid.rb
+++ b/services/api/lib/has_uuid.rb
@@ -30,7 +30,7 @@ module HasUuid
Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]
end
def generate_uuid
- [Server::Application.config.uuid_prefix,
+ [Rails.configuration.ClusterID,
self.uuid_prefix,
rand(2**256).to_s(36)[-15..-1]].
join '-'
diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb
index ef3198bc6..7d67668c6 100644
--- a/services/api/test/functional/arvados/v1/collections_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/collections_controller_test.rb
@@ -1176,7 +1176,7 @@ EOS
assert_response 200
c = Collection.find_by_uuid(uuid)
assert_operator c.trash_at, :<, db_current_time
- assert_equal c.delete_at, c.trash_at + Rails.configuration.Collection["BlobSigningTTL"]
+ assert_equal c.delete_at, c.trash_at + Rails.configuration.Collections["BlobSigningTTL"]
end
test 'delete long-trashed collection immediately using http DELETE verb' do
diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
index d986ff937..d92561fc1 100644
--- a/services/api/test/functional/arvados/v1/repositories_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
@@ -200,15 +200,15 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
end
[
- {cfg: "Services.GitSSH.ExternalURL", cfgval: "git at example.com:", match: %r"^git at example.com:"},
- {cfg: "Services.GitSSH.ExternalURL", cfgval: true, match: %r"^git at git.zzzzz.arvadosapi.com:"},
- {cfg: "Services.GitSSH.ExternalURL", cfgval: false, refute: /^git@/ },
- {cfg: "Services.GitHTTP.ExternalURL", cfgval: "https://example.com/", match: %r"^https://example.com/"},
- {cfg: "Services.GitHTTP.ExternalURL", cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
- {cfg: "Services.GitHTTP.ExternalURL", cfgval: false, refute: /^http/ },
+ {cfg: "GitSSH.ExternalURL", cfgval: URI("ssh://git@example.com"), match: %r"^git at example.com:"},
+ {cfg: "GitSSH.ExternalURL", cfgval: URI(""), match: %r"^git at git.zzzzz.arvadosapi.com:"},
+ {cfg: "GitSSH", cfgval: false, refute: /^git@/ },
+ {cfg: "GitHTTP.ExternalURL", cfgval: URI("https://example.com/"), match: %r"^https://example.com/"},
+ {cfg: "GitHTTP.ExternalURL", cfgval: URI(""), match: %r"^https://git.zzzzz.arvadosapi.com/"},
+ {cfg: "GitHTTP", cfgval: false, refute: /^http/ },
].each do |expect|
test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
- set_cfg Rails.configuration, expect[:cfg].to_s, expect[:cfgval]
+ set_cfg Rails.configuration.Services, expect[:cfg].to_s, expect[:cfgval]
authorize_with :active
get :index
assert_response :success
diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb
index 5d998a244..c8dcfef23 100644
--- a/services/api/test/unit/container_request_test.rb
+++ b/services/api/test/unit/container_request_test.rb
@@ -525,6 +525,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
'ENOEXIST',
'arvados/apitestfixture:ENOEXIST',
].each do |img|
+ puts "RC", Rails.configuration.RemoteClusters
test "container_image_for_container(#{img.inspect}) => 422" do
set_user_from_auth :active
assert_raises(ArvadosModel::UnresolvableContainerError) do
@@ -884,7 +885,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
[false, ActiveRecord::RecordInvalid],
[true, nil],
].each do |preemptible_conf, expected|
- test "having Rails.configuration.Containers["UsePreemptibleInstances"]=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
+ test "having Rails.configuration.Containers['UsePreemptibleInstances']=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
sp = {"preemptible" => true}
common_attrs = {cwd: "test",
priority: 1,
@@ -946,7 +947,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
[false, 'zzzzz-dz642-runningcontainr', nil],
[false, nil, nil],
].each do |preemptible_conf, requesting_c, schedule_preemptible|
- test "having Rails.configuration.Containers["UsePreemptibleInstances"]=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
+ test "having Rails.configuration.Containers['UsePreemptibleInstances']=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
common_attrs = {cwd: "test",
priority: 1,
command: ["echo", "hello"],
diff --git a/services/api/test/unit/job_test.rb b/services/api/test/unit/job_test.rb
index 65725f330..5a8826256 100644
--- a/services/api/test/unit/job_test.rb
+++ b/services/api/test/unit/job_test.rb
@@ -130,7 +130,7 @@ class JobTest < ActiveSupport::TestCase
Rails.configuration.RemoteClusters = {}
job = Job.new job_attrs(runtime_constraints:
{'docker_image' => image_spec})
- assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
+ assert(job.invalid?, "nonexistent Docker image #{spec_type} #{image_spec} was valid")
end
end
diff --git a/services/api/test/unit/user_notifier_test.rb b/services/api/test/unit/user_notifier_test.rb
index b5688fcc6..79d3e15ef 100644
--- a/services/api/test/unit/user_notifier_test.rb
+++ b/services/api/test/unit/user_notifier_test.rb
@@ -19,7 +19,7 @@ class UserNotifierTest < ActionMailer::TestCase
assert_equal 'Welcome to Arvados - shell account enabled', email.subject
assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),
'Expected Your Arvados shell account has been set up in email body'
- assert (email.body.to_s.include? Rails.configuration.Services["Workbench1"]["ExternalURL"]),
+ assert (email.body.to_s.include? Rails.configuration.Services["Workbench1"]["ExternalURL"].to_s),
'Expected workbench url in email body'
end
commit 8a0e9c549595e114a0eadc9d6792a17fb59d0f3e
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Mon Mar 25 11:28:33 2019 -0400
13996: Migrate tests to new config
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index aaae19f4b..46da37afd 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -125,7 +125,7 @@ class Collection < ArvadosModel
# Signature provided, but verify_signature did not like it.
logger.warn "Invalid signature on locator #{tok}"
raise ArvadosModel::PermissionDeniedError
- elsif Rails.configuration.Collections["BlobSigning"]
+ elsif !Rails.configuration.Collections["BlobSigning"]
# No signature provided, but we are running in insecure mode.
logger.debug "Missing signature on locator #{tok} ignored"
elsif Blob.new(tok).empty?
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 53439cec8..23a5eb5a9 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -111,11 +111,11 @@ declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job
declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
-declare_config "Services.Workbench1.ExternalURL", String, :workbench_address
-declare_config "Services.Websocket.ExternalURL", String, :websocket_address
-declare_config "Services.WebDAV.ExternalURL", String, :keep_web_service_url
-declare_config "Services.GitHTTP.ExternalURL", String, :git_repo_https_base
-declare_config "Services.GitSSH.ExternalURL", String, :git_repo_ssh_base
+declare_config "Services.Workbench1.ExternalURL", URI, :workbench_address
+declare_config "Services.Websocket.ExternalURL", URI, :websocket_address
+declare_config "Services.WebDAV.ExternalURL", URI, :keep_web_service_url
+declare_config "Services.GitHTTP.ExternalURL", URI, :git_repo_https_base
+declare_config "Services.GitSSH.ExternalURL", URI, :git_repo_ssh_base
declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
h = {}
v.each do |clusterid, host|
@@ -144,7 +144,7 @@ application_config = {}
end
end
-remainders = migrate_config application_config, $arvados_config
+$remaining_config = migrate_config application_config, $arvados_config
if application_config[:auto_activate_users_from]
application_config[:auto_activate_users_from].each do |cluster|
@@ -154,38 +154,13 @@ if application_config[:auto_activate_users_from]
end
end
+# Checks for wrongly typed configuration items, and essential items
+# that can't be empty
coercion_and_check $arvados_config
Server::Application.configure do
nils = []
- $arvados_config.each do |k, v|
- cfg = config
- if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
- # Config must have been set already in environments/*.rb.
- #
- # After config files have been migrated, this mechanism should
- # be deprecated, then removed.
- elsif v.nil?
- # Config variables are not allowed to be nil. Make a "naughty"
- # list, and present it below.
- nils << k
- else
- cfg.send "#{k}=", v
- end
- end
- remainders.each do |k, v|
- config.send "#{k}=", v
- end
-
- if !nils.empty?
- raise <<EOS
-Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
-
-The following configuration settings must be specified in
-config/application.yml:
-* #{nils.join "\n* "}
-
-EOS
- end
+ copy_into_config $arvados_config, config
+ copy_into_config $remaining_config, config
config.secret_key_base = config.secret_token
end
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
index 6bfbdd7a8..1a4135e84 100644
--- a/services/api/lib/config_loader.rb
+++ b/services/api/lib/config_loader.rb
@@ -29,7 +29,7 @@ module Psych
end
def set_cfg cfg, k, v
- # "foo.bar: baz" --> { config.foo.bar = baz }
+ # "foo.bar = baz" --> { cfg["foo"]["bar"] = baz }
ks = k.split '.'
k = ks.pop
ks.each do |kk|
@@ -126,9 +126,19 @@ def coercion_and_check check_cfg
end
end
+ if cfgtype == URI
+ cfg[k] = URI(cfg[k])
+ end
+
if !cfg[k].is_a? cfgtype
raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
end
end
end
+
+def copy_into_config src, dst
+ src.each do |k, v|
+ dst.send "#{k}=", Marshal.load(Marshal.dump v)
+ end
+end
diff --git a/services/api/test/functional/arvados/v1/collections_controller_test.rb b/services/api/test/functional/arvados/v1/collections_controller_test.rb
index ee2b016cd..ef3198bc6 100644
--- a/services/api/test/functional/arvados/v1/collections_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/collections_controller_test.rb
@@ -11,7 +11,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
def permit_unsigned_manifests isok=true
# Set security model for the life of a test.
- Rails.configuration.permit_create_collection_with_unsigned_manifest = isok
+ Rails.configuration.Collections["BlobSigning"] = !isok
end
def assert_signed_manifest manifest_text, label='', token: false
@@ -24,7 +24,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
exp = tok[/\+A[[:xdigit:]]+@([[:xdigit:]]+)/, 1].to_i(16)
sig = Blob.sign_locator(
bare,
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
expire: exp,
api_token: token)[/\+A[^\+]*/, 0]
assert_includes tok, sig
@@ -88,7 +88,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
token = api_client_authorizations(:active).send(token_method)
signed = Blob.sign_locator(
'acbd18db4cc2f85cedef654fccc4a4d8+3',
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: token)
authorize_with_token token
put :update, params: {
@@ -221,7 +221,7 @@ class Arvados::V1::CollectionsControllerTest < ActionController::TestCase
def request_capped_index(params={})
authorize_with :user1_with_load
coll1 = collections(:collection_1_of_201)
- Rails.configuration.max_index_database_read =
+ Rails.configuration.API["MaxIndexDatabaseRead"] =
yield(coll1.manifest_text.size)
get :index, params: {
select: %w(uuid manifest_text),
@@ -566,7 +566,7 @@ EOS
# Build a manifest with both signed and unsigned locators.
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
signed_locators = locators.collect do |x|
@@ -622,7 +622,7 @@ EOS
# TODO(twp): in phase 4, all locators will need to be signed, so
# this test should break and will need to be rewritten. Issue #2755.
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
ttl: 3600 # 1 hour
}
@@ -653,7 +653,7 @@ EOS
test "create fails with invalid signature" do
authorize_with :active
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
@@ -683,7 +683,7 @@ EOS
test "create fails with uuid of signed manifest" do
authorize_with :active
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
@@ -755,7 +755,7 @@ EOS
ea10d51bcf88862dbcc36eb292017dfd+45)
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
@@ -903,7 +903,7 @@ EOS
[1, 5, nil].each do |ask|
test "Set replication_desired=#{ask.inspect}" do
- Rails.configuration.default_collection_replication = 2
+ Rails.configuration.Collections["DefaultReplication"] = 2
authorize_with :active
put :update, params: {
id: collections(:replication_undesired_unconfirmed).uuid,
@@ -1176,7 +1176,7 @@ EOS
assert_response 200
c = Collection.find_by_uuid(uuid)
assert_operator c.trash_at, :<, db_current_time
- assert_equal c.delete_at, c.trash_at + Rails.configuration.blob_signature_ttl
+ assert_equal c.delete_at, c.trash_at + Rails.configuration.Collection["BlobSigningTTL"]
end
test 'delete long-trashed collection immediately using http DELETE verb' do
@@ -1208,7 +1208,7 @@ EOS
assert_response 200
c = Collection.find_by_uuid(uuid)
assert_operator c.trash_at, :<, db_current_time
- assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.default_trash_lifetime
+ assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.Collections["DefaultTrashLifetime"]
end
end
@@ -1373,8 +1373,8 @@ EOS
end
test "update collection with versioning enabled" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 1 # 1 second
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 1 # 1 second
col = collections(:collection_owned_by_active)
assert_equal 2, col.version
@@ -1383,7 +1383,7 @@ EOS
token = api_client_authorizations(:active).v2token
signed = Blob.sign_locator(
'acbd18db4cc2f85cedef654fccc4a4d8+3',
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: token)
authorize_with_token token
put :update, params: {
diff --git a/services/api/test/functional/arvados/v1/filters_test.rb b/services/api/test/functional/arvados/v1/filters_test.rb
index b596baaae..d49fe7a3e 100644
--- a/services/api/test/functional/arvados/v1/filters_test.rb
+++ b/services/api/test/functional/arvados/v1/filters_test.rb
@@ -108,7 +108,7 @@ class Arvados::V1::FiltersTest < ActionController::TestCase
format: :json,
count: 'none',
limit: 1000,
- filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+ filters: [['any', '@@', Rails.configuration.ClusterID]],
}
assert_response :success
@@ -137,7 +137,7 @@ class Arvados::V1::FiltersTest < ActionController::TestCase
limit: 1000,
offset: '5',
last_object_class: 'PipelineInstance',
- filters: [['any', '@@', Rails.configuration.uuid_prefix]],
+ filters: [['any', '@@', Rails.configuration.ClusterID]],
}
assert_response :success
diff --git a/services/api/test/functional/arvados/v1/groups_controller_test.rb b/services/api/test/functional/arvados/v1/groups_controller_test.rb
index 37b606409..ff86f0493 100644
--- a/services/api/test/functional/arvados/v1/groups_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/groups_controller_test.rb
@@ -431,7 +431,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
end
test 'get contents with jobs and pipeline instances disabled' do
- Rails.configuration.disable_api_methods = ['jobs.index', 'pipeline_instances.index']
+ Rails.configuration.API["DisabledAPIs"] = ['jobs.index', 'pipeline_instances.index']
authorize_with :active
get :contents, params: {
@@ -444,7 +444,7 @@ class Arvados::V1::GroupsControllerTest < ActionController::TestCase
test 'get contents with low max_index_database_read' do
# Some result will certainly have at least 12 bytes in a
# restricted column
- Rails.configuration.max_index_database_read = 12
+ Rails.configuration.API["MaxIndexDatabaseRead"] = 12
authorize_with :active
get :contents, params: {
id: groups(:aproject).uuid,
diff --git a/services/api/test/functional/arvados/v1/jobs_controller_test.rb b/services/api/test/functional/arvados/v1/jobs_controller_test.rb
index fb81f2363..8bbe96687 100644
--- a/services/api/test/functional/arvados/v1/jobs_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/jobs_controller_test.rb
@@ -83,7 +83,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
# We need to verify that "cancel" creates a trigger file, so first
# let's make sure there is no stale trigger file.
begin
- File.unlink(Rails.configuration.crunch_refresh_trigger)
+ File.unlink(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"])
rescue Errno::ENOENT
end
@@ -105,7 +105,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
'server should correct bogus cancelled_at ' +
job['cancelled_at'])
assert_equal(true,
- File.exist?(Rails.configuration.crunch_refresh_trigger),
+ File.exist?(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"]),
'trigger file should be created when job is cancelled')
end
@@ -123,7 +123,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
# We need to verify that "cancel" creates a trigger file, so first
# let's make sure there is no stale trigger file.
begin
- File.unlink(Rails.configuration.crunch_refresh_trigger)
+ File.unlink(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"])
rescue Errno::ENOENT
end
@@ -144,7 +144,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
# We need to verify that "cancel" creates a trigger file, so first
# let's make sure there is no stale trigger file.
begin
- File.unlink(Rails.configuration.crunch_refresh_trigger)
+ File.unlink(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"])
rescue Errno::ENOENT
end
@@ -480,7 +480,7 @@ class Arvados::V1::JobsControllerTest < ActionController::TestCase
end
test 'jobs.create disabled in config' do
- Rails.configuration.disable_api_methods = ["jobs.create",
+ Rails.configuration.API["DisabledAPIs"] = ["jobs.create",
"pipeline_instances.create"]
authorize_with :active
post :create, params: {
diff --git a/services/api/test/functional/arvados/v1/nodes_controller_test.rb b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
index 0beff6882..435764baa 100644
--- a/services/api/test/functional/arvados/v1/nodes_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/nodes_controller_test.rb
@@ -223,7 +223,7 @@ class Arvados::V1::NodesControllerTest < ActionController::TestCase
end
test "node should fail ping with invalid hostname config format" do
- Rails.configuration.assign_node_hostname = 'compute%<slot_number>04' # should end with "04d"
+ Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"] = 'compute%<slot_number>04' # should end with "04d"
post :ping, params: {
id: nodes(:new_with_no_hostname).uuid,
ping_secret: nodes(:new_with_no_hostname).info['ping_secret'],
diff --git a/services/api/test/functional/arvados/v1/repositories_controller_test.rb b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
index b810d6993..d986ff937 100644
--- a/services/api/test/functional/arvados/v1/repositories_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/repositories_controller_test.rb
@@ -200,15 +200,15 @@ class Arvados::V1::RepositoriesControllerTest < ActionController::TestCase
end
[
- {cfg: :git_repo_ssh_base, cfgval: "git at example.com:", match: %r"^git at example.com:"},
- {cfg: :git_repo_ssh_base, cfgval: true, match: %r"^git at git.zzzzz.arvadosapi.com:"},
- {cfg: :git_repo_ssh_base, cfgval: false, refute: /^git@/ },
- {cfg: :git_repo_https_base, cfgval: "https://example.com/", match: %r"^https://example.com/"},
- {cfg: :git_repo_https_base, cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
- {cfg: :git_repo_https_base, cfgval: false, refute: /^http/ },
+ {cfg: "Services.GitSSH.ExternalURL", cfgval: "git at example.com:", match: %r"^git at example.com:"},
+ {cfg: "Services.GitSSH.ExternalURL", cfgval: true, match: %r"^git at git.zzzzz.arvadosapi.com:"},
+ {cfg: "Services.GitSSH.ExternalURL", cfgval: false, refute: /^git@/ },
+ {cfg: "Services.GitHTTP.ExternalURL", cfgval: "https://example.com/", match: %r"^https://example.com/"},
+ {cfg: "Services.GitHTTP.ExternalURL", cfgval: true, match: %r"^https://git.zzzzz.arvadosapi.com/"},
+ {cfg: "Services.GitHTTP.ExternalURL", cfgval: false, refute: /^http/ },
].each do |expect|
test "set #{expect[:cfg]} to #{expect[:cfgval]}" do
- Rails.configuration.send expect[:cfg].to_s+"=", expect[:cfgval]
+ set_cfg Rails.configuration, expect[:cfg].to_s, expect[:cfgval]
authorize_with :active
get :index
assert_response :success
diff --git a/services/api/test/functional/arvados/v1/schema_controller_test.rb b/services/api/test/functional/arvados/v1/schema_controller_test.rb
index 53c1ed72e..9d4912805 100644
--- a/services/api/test/functional/arvados/v1/schema_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/schema_controller_test.rb
@@ -65,7 +65,7 @@ class Arvados::V1::SchemaControllerTest < ActionController::TestCase
end
test "non-empty disable_api_methods" do
- Rails.configuration.disable_api_methods =
+ Rails.configuration.API["DisabledAPIs"] =
['jobs.create', 'pipeline_instances.create', 'pipeline_templates.create']
get :index
assert_response :success
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
index 22a44a97a..66e32713e 100644
--- a/services/api/test/functional/arvados/v1/users_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/users_controller_test.rb
@@ -638,12 +638,12 @@ class Arvados::V1::UsersControllerTest < ActionController::TestCase
setup_email = ActionMailer::Base.deliveries.last
assert_not_nil setup_email, 'Expected email after setup'
- assert_equal Rails.configuration.user_notifier_email_from, setup_email.from[0]
+ assert_equal Rails.configuration.Users["UserNotifierEmailFrom"], setup_email.from[0]
assert_equal 'foo at example.com', setup_email.to[0]
assert_equal 'Welcome to Arvados - shell account enabled', setup_email.subject
assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'),
'Expected Your Arvados shell account has been set up in email body'
- assert (setup_email.body.to_s.include? "#{Rails.configuration.workbench_address}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
+ assert (setup_email.body.to_s.include? "#{Rails.configuration.Services["Workbench1"]["ExternalURL"]}users/#{created['uuid']}/virtual_machines"), 'Expected virtual machines url in email body'
end
test "setup inactive user by changing is_active to true" do
diff --git a/services/api/test/helpers/git_test_helper.rb b/services/api/test/helpers/git_test_helper.rb
index 170b59ee1..7ad9bcad1 100644
--- a/services/api/test/helpers/git_test_helper.rb
+++ b/services/api/test/helpers/git_test_helper.rb
@@ -19,14 +19,14 @@ module GitTestHelper
def self.included base
base.setup do
# Extract the test repository data into the default test
- # environment's Rails.configuration.git_repositories_dir. (We
+ # environment's Rails.configuration.Git["Repositories"]. (We
# don't use that config setting here, though: it doesn't seem
# worth the risk of stepping on a real git repo root.)
@tmpdir = Rails.root.join 'tmp', 'git'
FileUtils.mkdir_p @tmpdir
system("tar", "-xC", @tmpdir.to_s, "-f", "test/test.git.tar")
- Rails.configuration.git_repositories_dir = "#{@tmpdir}/test"
- Rails.configuration.git_internal_dir = "#{@tmpdir}/internal.git"
+ Rails.configuration.Git["Repositories"] = "#{@tmpdir}/test"
+ Rails.configuration.Containers["JobsAPI"]["GitInternalDir"] = "#{@tmpdir}/internal.git"
end
base.teardown do
@@ -37,7 +37,7 @@ module GitTestHelper
end
def internal_tag tag
- IO.read "|git --git-dir #{Rails.configuration.git_internal_dir.shellescape} log --format=format:%H -n1 #{tag.shellescape}"
+ IO.read "|git --git-dir #{Rails.configuration.Containers["JobsAPI"]["GitInternalDir"].shellescape} log --format=format:%H -n1 #{tag.shellescape}"
end
# Intercept fetch_remote_repository and fetch from a specified url
diff --git a/services/api/test/integration/collections_api_test.rb b/services/api/test/integration/collections_api_test.rb
index 709657534..e82172b55 100644
--- a/services/api/test/integration/collections_api_test.rb
+++ b/services/api/test/integration/collections_api_test.rb
@@ -129,7 +129,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
test "store collection as json" do
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
@@ -146,7 +146,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
test "store collection with manifest_text only" do
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
@@ -163,7 +163,7 @@ class CollectionsApiTest < ActionDispatch::IntegrationTest
test "store collection then update name" do
signing_opts = {
- key: Rails.configuration.blob_signing_key,
+ key: Rails.configuration.Collections["BlobSigningKey"],
api_token: api_token(:active),
}
signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',
diff --git a/services/api/test/integration/groups_test.rb b/services/api/test/integration/groups_test.rb
index e45dd4eb5..9c2d023c0 100644
--- a/services/api/test/integration/groups_test.rb
+++ b/services/api/test/integration/groups_test.rb
@@ -194,7 +194,7 @@ class NonTransactionalGroupsTest < ActionDispatch::IntegrationTest
end
test "create request with async=true defers permissions update" do
- Rails.configuration.async_permissions_update_interval = 1 # second
+ Rails.configuration.API["AsyncPermissionsUpdateInterval"] = 1 # second
name = "Random group #{rand(1000)}"
assert_equal nil, Group.find_by_name(name)
diff --git a/services/api/test/integration/remote_user_test.rb b/services/api/test/integration/remote_user_test.rb
index 5c09cf1bc..644c2b1be 100644
--- a/services/api/test/integration/remote_user_test.rb
+++ b/services/api/test/integration/remote_user_test.rb
@@ -63,8 +63,8 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
ready.pop
@remote_server = srv
@remote_host = "127.0.0.1:#{srv.config[:Port]}"
- Rails.configuration.remote_hosts = Rails.configuration.remote_hosts.merge({'zbbbb' => @remote_host,
- 'zbork' => @remote_host})
+ Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({'zbbbb' => {"Host" => @remote_host},
+ 'zbork' => {"Host" => @remote_host}})
Arvados::V1::SchemaController.any_instance.stubs(:root_url).returns "https://#{@remote_host}"
@stub_status = 200
@stub_content = {
@@ -243,7 +243,7 @@ class RemoteUsersTest < ActionDispatch::IntegrationTest
end
test 'auto-activate user from trusted cluster' do
- Rails.configuration.auto_activate_users_from = ['zbbbb']
+ Rails.configuration.RemoteClusters['zbbbb']["ActivateUsers"] = true
get '/arvados/v1/users/current',
params: {format: 'json'},
headers: auth(remote: 'zbbbb')
diff --git a/services/api/test/integration/user_sessions_test.rb b/services/api/test/integration/user_sessions_test.rb
index f2dbaa506..e72562692 100644
--- a/services/api/test/integration/user_sessions_test.rb
+++ b/services/api/test/integration/user_sessions_test.rb
@@ -111,10 +111,10 @@ class UserSessionsApiTest < ActionDispatch::IntegrationTest
].each do |testcase|
test "user auto-activate #{testcase.inspect}" do
# Configure auto_setup behavior according to testcase[:cfg]
- Rails.configuration.auto_setup_new_users = testcase[:cfg][:auto]
- Rails.configuration.auto_setup_new_users_with_vm_uuid =
- (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : false)
- Rails.configuration.auto_setup_new_users_with_repository =
+ Rails.configuration.Users["AutoSetupNewUsers"] = testcase[:cfg][:auto]
+ Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"] =
+ (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : "")
+ Rails.configuration.Users["AutoSetupNewUsersWithRepository"] =
testcase[:cfg][:repo]
mock_auth_with(email: testcase[:email])
diff --git a/services/api/test/tasks/delete_old_container_logs_test.rb b/services/api/test/tasks/delete_old_container_logs_test.rb
index 45278ac1a..0403ceb2e 100644
--- a/services/api/test/tasks/delete_old_container_logs_test.rb
+++ b/services/api/test/tasks/delete_old_container_logs_test.rb
@@ -16,7 +16,7 @@ class DeleteOldContainerLogsTaskTest < ActiveSupport::TestCase
end
def run_with_expiry(clean_after)
- Rails.configuration.clean_container_log_rows_after = clean_after
+ Rails.configuration.Containers["Logging"]["MaxAge"] = clean_after
Rake::Task[TASK_NAME].reenable
Rake.application.invoke_task TASK_NAME
end
diff --git a/services/api/test/tasks/delete_old_job_logs_test.rb b/services/api/test/tasks/delete_old_job_logs_test.rb
index 4d4cdbc9e..6a793e1e0 100644
--- a/services/api/test/tasks/delete_old_job_logs_test.rb
+++ b/services/api/test/tasks/delete_old_job_logs_test.rb
@@ -16,7 +16,7 @@ class DeleteOldJobLogsTaskTest < ActiveSupport::TestCase
end
def run_with_expiry(clean_after)
- Rails.configuration.clean_job_log_rows_after = clean_after
+ Rails.configuration.Containers["Logging"]["MaxAge"] = clean_after
Rake::Task[TASK_NAME].reenable
Rake.application.invoke_task TASK_NAME
end
diff --git a/services/api/test/test_helper.rb b/services/api/test/test_helper.rb
index 939242cf8..e87a1c6d8 100644
--- a/services/api/test/test_helper.rb
+++ b/services/api/test/test_helper.rb
@@ -99,11 +99,8 @@ class ActiveSupport::TestCase
def restore_configuration
# Restore configuration settings changed during tests
- $application_config.each do |k,v|
- if k.match(/^[^.]*$/)
- Rails.configuration.send (k + '='), v
- end
- end
+ copy_into_config $arvados_config, Rails.configuration
+ copy_into_config $remaining_config, Rails.configuration
end
def set_user_from_auth(auth_name)
diff --git a/services/api/test/unit/blob_test.rb b/services/api/test/unit/blob_test.rb
index 429ebde97..ad083946d 100644
--- a/services/api/test/unit/blob_test.rb
+++ b/services/api/test/unit/blob_test.rb
@@ -130,14 +130,14 @@ class BlobTest < ActiveSupport::TestCase
expire: 0x7fffffff,
}
- original_ttl = Rails.configuration.blob_signature_ttl
- Rails.configuration.blob_signature_ttl = original_ttl*2
+ original_ttl = Rails.configuration.Collections["BlobSigningTTL"]
+ Rails.configuration.Collections["BlobSigningTTL"] = original_ttl*2
signed2 = Blob.sign_locator @@known_locator, {
api_token: @@known_token,
key: @@known_key,
expire: 0x7fffffff,
}
- Rails.configuration.blob_signature_ttl = original_ttl
+ Rails.configuration.Collections["BlobSigningTTL"] = original_ttl
assert_not_equal signed, signed2
end
diff --git a/services/api/test/unit/collection_test.rb b/services/api/test/unit/collection_test.rb
index 8deedee01..0ae9a0218 100644
--- a/services/api/test/unit/collection_test.rb
+++ b/services/api/test/unit/collection_test.rb
@@ -157,8 +157,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test "auto-create version after idle setting" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 600 # 10 minutes
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 600 # 10 minutes
act_as_user users(:active) do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -188,8 +188,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test "preserve_version=false assignment is ignored while being true and not producing a new version" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 3600
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 3600
act_as_user users(:active) do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -244,8 +244,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test "uuid updates on current version make older versions update their pointers" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_system_user do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -267,8 +267,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test "older versions' modified_at indicate when they're created" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_user users(:active) do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -301,8 +301,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test "past versions should not be directly updatable" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_system_user do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -324,7 +324,7 @@ class CollectionTest < ActiveSupport::TestCase
assert c_old.invalid?
c_old.reload
# Now disable collection versioning, it should behave the same way
- Rails.configuration.collection_versioning = false
+ Rails.configuration.Collections["CollectionVersioning"] = false
c_old.name = 'this was foo'
assert c_old.invalid?
end
@@ -337,8 +337,8 @@ class CollectionTest < ActiveSupport::TestCase
['is_trashed', true, false],
].each do |attr, first_val, second_val|
test "sync #{attr} with older versions" do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_system_user do
# Set up initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -379,8 +379,8 @@ class CollectionTest < ActiveSupport::TestCase
[false, 'replication_desired', 5, false],
].each do |versioning, attr, val, new_version_expected|
test "update #{attr} with versioning #{versioning ? '' : 'not '}enabled should #{new_version_expected ? '' : 'not '}create a new version" do
- Rails.configuration.collection_versioning = versioning
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = versioning
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_user users(:active) do
# Create initial collection
c = create_collection 'foo', Encoding::US_ASCII
@@ -414,8 +414,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test 'current_version_uuid is ignored during update' do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_user users(:active) do
# Create 1st collection
col1 = create_collection 'foo', Encoding::US_ASCII
@@ -439,8 +439,8 @@ class CollectionTest < ActiveSupport::TestCase
end
test 'with versioning enabled, simultaneous updates increment version correctly' do
- Rails.configuration.collection_versioning = true
- Rails.configuration.preserve_version_if_idle = 0
+ Rails.configuration.Collections["CollectionVersioning"] = true
+ Rails.configuration.Collections["PreserveVersionIfIdle"] = 0
act_as_user users(:active) do
# Create initial collection
col = create_collection 'foo', Encoding::US_ASCII
@@ -654,7 +654,7 @@ class CollectionTest < ActiveSupport::TestCase
[0, 2, 4, nil].each do |ask|
test "set replication_desired to #{ask.inspect}" do
- Rails.configuration.default_collection_replication = 2
+ Rails.configuration.Collections["DefaultReplication"] = 2
act_as_user users(:active) do
c = collections(:replication_undesired_unconfirmed)
c.update_attributes replication_desired: ask
@@ -760,7 +760,7 @@ class CollectionTest < ActiveSupport::TestCase
name: 'foo',
trash_at: db_current_time + 1.years)
sig_exp = /\+A[0-9a-f]{40}\@([0-9]+)/.match(c.signed_manifest_text)[1].to_i
- expect_max_sig_exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+ expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections["BlobSigningTTL"]
assert_operator c.trash_at.to_i, :>, expect_max_sig_exp
assert_operator sig_exp.to_i, :<=, expect_max_sig_exp
end
@@ -849,7 +849,7 @@ class CollectionTest < ActiveSupport::TestCase
test test_name do
act_as_user users(:active) do
min_exp = (db_current_time +
- Rails.configuration.blob_signature_ttl.seconds)
+ Rails.configuration.Collections["BlobSigningTTL"].seconds)
if fixture_name == :expired_collection
# Fixture-finder shorthand doesn't find trashed collections
# because they're not in the default scope.
@@ -890,7 +890,7 @@ class CollectionTest < ActiveSupport::TestCase
end
test 'default trash interval > blob signature ttl' do
- Rails.configuration.default_trash_lifetime = 86400 * 21 # 3 weeks
+ Rails.configuration.Collections["DefaultTrashLifetime"] = 86400 * 21 # 3 weeks
start = db_current_time
act_as_user users(:active) do
c = Collection.create!(manifest_text: '', name: 'foo')
diff --git a/services/api/test/unit/commit_test.rb b/services/api/test/unit/commit_test.rb
index af365b19e..5ae8080e9 100644
--- a/services/api/test/unit/commit_test.rb
+++ b/services/api/test/unit/commit_test.rb
@@ -78,7 +78,7 @@ class CommitTest < ActiveSupport::TestCase
test 'tag_in_internal_repository creates and updates tags in internal.git' do
authorize_with :active
- gitint = "git --git-dir #{Rails.configuration.git_internal_dir}"
+ gitint = "git --git-dir #{Rails.configuration.Containers["JobsAPI"]["GitInternalDir"]}"
IO.read("|#{gitint} tag -d testtag 2>/dev/null") # "no such tag", fine
assert_match(/^fatal: /, IO.read("|#{gitint} show testtag 2>&1"))
refute $?.success?
@@ -88,7 +88,7 @@ class CommitTest < ActiveSupport::TestCase
end
def with_foo_repository
- Dir.chdir("#{Rails.configuration.git_repositories_dir}/#{repositories(:foo).uuid}") do
+ Dir.chdir("#{Rails.configuration.Git["Repositories"]}/#{repositories(:foo).uuid}") do
must_pipe("git checkout master 2>&1")
yield
end
@@ -107,7 +107,7 @@ class CommitTest < ActiveSupport::TestCase
must_pipe("git -c user.email=x at x -c user.name=X commit -m -")
end
Commit.tag_in_internal_repository 'active/foo', sha1, tag
- gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+ gitint = "git --git-dir #{Rails.configuration.Containers["JobsAPI"]["GitInternalDir"].shellescape}"
assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
assert $?.success?
end
@@ -123,7 +123,7 @@ class CommitTest < ActiveSupport::TestCase
must_pipe("git reset --hard HEAD^")
end
Commit.tag_in_internal_repository 'active/foo', sha1, tag
- gitint = "git --git-dir #{Rails.configuration.git_internal_dir.shellescape}"
+ gitint = "git --git-dir #{Rails.configuration.Containers["JobsAPI"]["GitInternalDir"].shellescape}"
assert_match(/^commit /, IO.read("|#{gitint} show #{tag.shellescape}"))
assert $?.success?
end
diff --git a/services/api/test/unit/container_request_test.rb b/services/api/test/unit/container_request_test.rb
index 5c4a56c2c..5d998a244 100644
--- a/services/api/test/unit/container_request_test.rb
+++ b/services/api/test/unit/container_request_test.rb
@@ -514,7 +514,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
test "Container.resolve_container_image(pdh)" do
set_user_from_auth :active
[[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|
- Rails.configuration.docker_image_formats = [ver]
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = [ver]
pdh = collections(coll).portable_data_hash
resolved = Container.resolve_container_image(pdh)
assert_equal resolved, pdh
@@ -535,12 +535,12 @@ class ContainerRequestTest < ActiveSupport::TestCase
test "allow unrecognized container when there are remote_hosts" do
set_user_from_auth :active
- Rails.configuration.remote_hosts = {"foooo" => "bar.com"}
+ Rails.configuration.RemoteClusters = {"foooo" => {"Host" => "bar.com"} }
Container.resolve_container_image('acbd18db4cc2f85cedef654fccc4a4d8+3')
end
test "migrated docker image" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v2']
add_docker19_migration_link
# Test that it returns only v2 images even though request is for v1 image.
@@ -558,7 +558,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
test "use unmigrated docker image" do
- Rails.configuration.docker_image_formats = ['v1']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v1']
add_docker19_migration_link
# Test that it returns only supported v1 images even though there is a
@@ -577,7 +577,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
test "incompatible docker image v1" do
- Rails.configuration.docker_image_formats = ['v1']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v1']
add_docker19_migration_link
# Don't return unsupported v2 image even if we ask for it directly.
@@ -590,7 +590,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
end
test "incompatible docker image v2" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v2']
# No migration link, don't return unsupported v1 image,
set_user_from_auth :active
@@ -836,7 +836,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
assert_not_nil(trash)
assert_not_nil(delete)
assert_in_delta(trash, now + 1.second, 10)
- assert_in_delta(delete, now + Rails.configuration.blob_signature_ttl.second, 10)
+ assert_in_delta(delete, now + Rails.configuration.Collections["BlobSigningTTL"].second, 10)
end
def check_output_ttl_1y(now, trash, delete)
@@ -884,7 +884,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
[false, ActiveRecord::RecordInvalid],
[true, nil],
].each do |preemptible_conf, expected|
- test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
+ test "having Rails.configuration.Containers["UsePreemptibleInstances"]=#{preemptible_conf}, create preemptible container request and verify #{expected}" do
sp = {"preemptible" => true}
common_attrs = {cwd: "test",
priority: 1,
@@ -892,7 +892,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
output_path: "test",
scheduling_parameters: sp,
mounts: {"test" => {"kind" => "json"}}}
- Rails.configuration.preemptible_instances = preemptible_conf
+ Rails.configuration.Containers["UsePreemptibleInstances"] = preemptible_conf
set_user_from_auth :active
cr = create_minimal_req!(common_attrs)
@@ -921,7 +921,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
scheduling_parameters: {"preemptible" => false},
mounts: {"test" => {"kind" => "json"}}}
- Rails.configuration.preemptible_instances = true
+ Rails.configuration.Containers["UsePreemptibleInstances"] = true
set_user_from_auth :active
if requesting_c
@@ -946,14 +946,14 @@ class ContainerRequestTest < ActiveSupport::TestCase
[false, 'zzzzz-dz642-runningcontainr', nil],
[false, nil, nil],
].each do |preemptible_conf, requesting_c, schedule_preemptible|
- test "having Rails.configuration.preemptible_instances=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
+ test "having Rails.configuration.Containers["UsePreemptibleInstances"]=#{preemptible_conf}, #{requesting_c.nil? ? 'non-':''}child CR should #{schedule_preemptible ? '':'not'} ask for preemptible instance by default" do
common_attrs = {cwd: "test",
priority: 1,
command: ["echo", "hello"],
output_path: "test",
mounts: {"test" => {"kind" => "json"}}}
- Rails.configuration.preemptible_instances = preemptible_conf
+ Rails.configuration.Containers["UsePreemptibleInstances"] = preemptible_conf
set_user_from_auth :active
if requesting_c
@@ -1017,7 +1017,7 @@ class ContainerRequestTest < ActiveSupport::TestCase
state: ContainerRequest::Committed,
mounts: {"test" => {"kind" => "json"}}}
set_user_from_auth :active
- Rails.configuration.preemptible_instances = true
+ Rails.configuration.Containers["UsePreemptibleInstances"] = true
cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do
create_minimal_req!(common_attrs)
diff --git a/services/api/test/unit/container_test.rb b/services/api/test/unit/container_test.rb
index 5ce3739a3..1711841b4 100644
--- a/services/api/test/unit/container_test.rb
+++ b/services/api/test/unit/container_test.rb
@@ -241,7 +241,7 @@ class ContainerTest < ActiveSupport::TestCase
end
test "find_reusable method should select higher priority queued container" do
- Rails.configuration.log_reuse_decisions = true
+ Rails.configuration.Containers["LogReuseDecisions"] = true
set_user_from_auth :active
common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{"var" => "queued"}})
c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1}))
@@ -511,7 +511,7 @@ class ContainerTest < ActiveSupport::TestCase
test "find_reusable with logging enabled" do
set_user_from_auth :active
- Rails.configuration.log_reuse_decisions = true
+ Rails.configuration.Containers["LogReuseDecisions"] = true
Rails.logger.expects(:info).at_least(3)
Container.find_reusable(REUSABLE_COMMON_ATTRS)
end
@@ -666,7 +666,7 @@ class ContainerTest < ActiveSupport::TestCase
end
test "Exceed maximum lock-unlock cycles" do
- Rails.configuration.max_container_dispatch_attempts = 3
+ Rails.configuration.Containers["MaxDispatchAttempts"] = 3
set_user_from_auth :active
c, cr = minimal_new
diff --git a/services/api/test/unit/crunch_dispatch_test.rb b/services/api/test/unit/crunch_dispatch_test.rb
index 42ef0d160..d2c0b124e 100644
--- a/services/api/test/unit/crunch_dispatch_test.rb
+++ b/services/api/test/unit/crunch_dispatch_test.rb
@@ -99,7 +99,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase
test 'override --cgroup-root with CRUNCH_CGROUP_ROOT' do
ENV['CRUNCH_CGROUP_ROOT'] = '/path/to/cgroup'
- Rails.configuration.crunch_job_wrapper = :none
+ Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"] = "none"
act_as_system_user do
j = Job.create(repository: 'active/foo',
script: 'hash',
@@ -140,7 +140,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase
test 'rate limit of partial line segments' do
act_as_system_user do
- Rails.configuration.crunch_log_partial_line_throttle_period = 1
+ Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"] = 1
job = {}
job[:bytes_logged] = 0
@@ -197,7 +197,7 @@ class CrunchDispatchTest < ActiveSupport::TestCase
end
test 'scancel orphaned job nodes' do
- Rails.configuration.crunch_job_wrapper = :slurm_immediate
+ Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"] = "slurm_immediate"
act_as_system_user do
dispatch = CrunchDispatch.new
diff --git a/services/api/test/unit/fail_jobs_test.rb b/services/api/test/unit/fail_jobs_test.rb
index 3c7f9a909..01ed22060 100644
--- a/services/api/test/unit/fail_jobs_test.rb
+++ b/services/api/test/unit/fail_jobs_test.rb
@@ -40,8 +40,8 @@ class FailJobsTest < ActiveSupport::TestCase
end
test 'cancel slurm jobs' do
- Rails.configuration.crunch_job_wrapper = :slurm_immediate
- Rails.configuration.crunch_job_user = 'foobar'
+ Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"] = "slurm_immediate"
+ Rails.configuration.Containers["JobsAPI"]["CrunchJobUser"] = 'foobar'
fake_squeue = IO.popen("echo #{@job[:before_reboot].uuid}")
fake_scancel = IO.popen("true")
IO.expects(:popen).
@@ -55,7 +55,7 @@ class FailJobsTest < ActiveSupport::TestCase
end
test 'use reboot time' do
- Rails.configuration.crunch_job_wrapper = nil
+ Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"] = nil
@dispatch.expects(:open).once.with('/proc/stat').
returns open(Rails.root.join('test/fixtures/files/proc_stat'))
@dispatch.fail_jobs(before: 'reboot')
diff --git a/services/api/test/unit/job_test.rb b/services/api/test/unit/job_test.rb
index fcbd1722f..65725f330 100644
--- a/services/api/test/unit/job_test.rb
+++ b/services/api/test/unit/job_test.rb
@@ -90,7 +90,7 @@ class JobTest < ActiveSupport::TestCase
].each do |use_config|
test "Job with no Docker image uses default docker image when configuration is set #{use_config}" do
default_docker_image = collections(:docker_image)[:portable_data_hash]
- Rails.configuration.default_docker_image_for_jobs = default_docker_image if use_config
+ Rails.configuration.Containers["JobsAPI"]["DefaultDockerImage"] = default_docker_image if use_config
job = Job.new job_attrs
assert job.valid?, job.errors.full_messages.to_s
@@ -127,7 +127,7 @@ class JobTest < ActiveSupport::TestCase
'locator' => BAD_COLLECTION,
}.each_pair do |spec_type, image_spec|
test "Job validation fails with nonexistent Docker image #{spec_type}" do
- Rails.configuration.remote_hosts = {}
+ Rails.configuration.RemoteClusters = {}
job = Job.new job_attrs(runtime_constraints:
{'docker_image' => image_spec})
assert(job.invalid?, "nonexistent Docker image #{spec_type} was valid")
@@ -426,7 +426,7 @@ class JobTest < ActiveSupport::TestCase
end
test "use migrated docker image if requesting old-format image by tag" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v2']
add_docker19_migration_link
job = Job.create!(
job_attrs(
@@ -438,7 +438,7 @@ class JobTest < ActiveSupport::TestCase
end
test "use migrated docker image if requesting old-format image by pdh" do
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v2']
add_docker19_migration_link
job = Job.create!(
job_attrs(
@@ -455,7 +455,7 @@ class JobTest < ActiveSupport::TestCase
[:docker_image_1_12, :docker_image_1_12, :docker_image_1_12],
].each do |existing_image, request_image, expect_image|
test "if a #{existing_image} job exists, #{request_image} yields #{expect_image} after migration" do
- Rails.configuration.docker_image_formats = ['v1']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v1']
if existing_image == :docker_image
oldjob = Job.create!(
@@ -477,7 +477,7 @@ class JobTest < ActiveSupport::TestCase
end
end
- Rails.configuration.docker_image_formats = ['v2']
+ Rails.configuration.Containers["SupportedDockerImageFormats"] = ['v2']
add_docker19_migration_link
# Check that both v1 and v2 images get resolved to v2.
@@ -568,7 +568,7 @@ class JobTest < ActiveSupport::TestCase
end
test 'find_reusable with logging' do
- Rails.configuration.log_reuse_decisions = true
+ Rails.configuration.Containers["LogReuseDecisions"] = true
Rails.logger.expects(:info).at_least(3)
try_find_reusable
end
@@ -595,7 +595,7 @@ class JobTest < ActiveSupport::TestCase
assert_nil Job.find_reusable(example_attrs, {}, [], [users(:active)])
# ...unless config says to reuse the earlier job in such cases.
- Rails.configuration.reuse_job_if_outputs_differ = true
+ Rails.configuration.Containers["JobsAPI"]["ReuseJobIfOutputsDiffer"] = true
j = Job.find_reusable(example_attrs, {}, [], [users(:active)])
assert_equal foobar.uuid, j.uuid
end
@@ -648,33 +648,32 @@ class JobTest < ActiveSupport::TestCase
end
test 'enable legacy api configuration option = true' do
- Rails.configuration.enable_legacy_jobs_api = true
+ Rails.configuration.Containers["JobsAPI"]["Enable"] = "true"
check_enable_legacy_jobs_api
- assert_equal [], Rails.configuration.disable_api_methods
+ assert_equal [], Rails.configuration.API["DisabledAPIs"]
end
test 'enable legacy api configuration option = false' do
- Rails.configuration.enable_legacy_jobs_api = false
+ Rails.configuration.Containers["JobsAPI"]["Enable"] = "false"
check_enable_legacy_jobs_api
- assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+ assert_equal Disable_jobs_api_method_list, Rails.configuration.API["DisabledAPIs"]
end
test 'enable legacy api configuration option = auto, has jobs' do
- Rails.configuration.enable_legacy_jobs_api = "auto"
+ Rails.configuration.Containers["JobsAPI"]["Enable"] = "auto"
assert Job.count > 0
- assert_equal [], Rails.configuration.disable_api_methods
check_enable_legacy_jobs_api
- assert_equal [], Rails.configuration.disable_api_methods
+ assert_equal [], Rails.configuration.API["DisabledAPIs"]
end
test 'enable legacy api configuration option = auto, no jobs' do
- Rails.configuration.enable_legacy_jobs_api = "auto"
+ Rails.configuration.Containers["JobsAPI"]["Enable"] = "auto"
act_as_system_user do
Job.destroy_all
end
assert_equal 0, Job.count
assert_equal [], Rails.configuration.disable_api_methods
check_enable_legacy_jobs_api
- assert_equal Disable_jobs_api_method_list, Rails.configuration.disable_api_methods
+ assert_equal Disable_jobs_api_method_list, Rails.configuration.API["DisabledAPIs"]
end
end
diff --git a/services/api/test/unit/log_test.rb b/services/api/test/unit/log_test.rb
index 5a78f2523..3f6e278cf 100644
--- a/services/api/test/unit/log_test.rb
+++ b/services/api/test/unit/log_test.rb
@@ -282,7 +282,7 @@ class LogTest < ActiveSupport::TestCase
end
test "non-empty configuration.unlogged_attributes" do
- Rails.configuration.unlogged_attributes = ["manifest_text"]
+ Rails.configuration.AuditLogs["UnloggedAttributes"] = ["manifest_text"]
txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
act_as_system_user do
@@ -297,7 +297,7 @@ class LogTest < ActiveSupport::TestCase
end
test "empty configuration.unlogged_attributes" do
- Rails.configuration.unlogged_attributes = []
+ Rails.configuration.AuditLogs["UnloggedAttributes"] = []
txt = ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
act_as_system_user do
@@ -332,8 +332,8 @@ class LogTest < ActiveSupport::TestCase
test 'retain old audit logs with default settings' do
assert_no_logs_deleted do
AuditLogs.delete_old(
- max_age: Rails.configuration.max_audit_log_age,
- max_batch: Rails.configuration.max_audit_log_delete_batch)
+ max_age: Rails.configuration.AuditLogs["MaxAge"],
+ max_batch: Rails.configuration.AuditLogs["MaxDeleteBatch"])
end
end
@@ -362,8 +362,8 @@ class LogTest < ActiveSupport::TestCase
test 'delete old audit logs in thread' do
begin
- Rails.configuration.max_audit_log_age = 20
- Rails.configuration.max_audit_log_delete_batch = 100000
+ Rails.configuration.AuditLogs["MaxAge"] = 20
+ Rails.configuration.AuditLogs["MaxDeleteBatch"] = 100000
Rails.cache.delete 'AuditLogs'
initial_log_count = Log.unscoped.all.count + 1
act_as_system_user do
diff --git a/services/api/test/unit/node_test.rb b/services/api/test/unit/node_test.rb
index 4cb7a0a1b..7e6a01db6 100644
--- a/services/api/test/unit/node_test.rb
+++ b/services/api/test/unit/node_test.rb
@@ -34,8 +34,8 @@ class NodeTest < ActiveSupport::TestCase
end
test "dns_server_conf_template" do
- Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
- Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = Rails.root.join 'tmp'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] = Rails.root.join 'config', 'unbound.template'
conffile = Rails.root.join 'tmp', 'compute65535.conf'
File.unlink conffile rescue nil
assert Node.dns_server_update 'compute65535', '127.0.0.1'
@@ -44,8 +44,8 @@ class NodeTest < ActiveSupport::TestCase
end
test "dns_server_restart_command" do
- Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp'
- Rails.configuration.dns_server_reload_command = 'foobar'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = Rails.root.join 'tmp'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"] = 'foobar'
restartfile = Rails.root.join 'tmp', 'restart.txt'
File.unlink restartfile rescue nil
assert Node.dns_server_update 'compute65535', '127.0.0.127'
@@ -54,14 +54,14 @@ class NodeTest < ActiveSupport::TestCase
end
test "dns_server_restart_command fail" do
- Rails.configuration.dns_server_conf_dir = Rails.root.join 'tmp', 'bogusdir'
- Rails.configuration.dns_server_reload_command = 'foobar'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = Rails.root.join 'tmp', 'bogusdir'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"] = 'foobar'
refute Node.dns_server_update 'compute65535', '127.0.0.127'
end
test "dns_server_update_command with valid command" do
testfile = Rails.root.join('tmp', 'node_test_dns_server_update_command.txt')
- Rails.configuration.dns_server_update_command =
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] =
('echo -n "%{hostname} == %{ip_address}" >' +
testfile.to_s.shellescape)
assert Node.dns_server_update 'compute65535', '127.0.0.1'
@@ -70,23 +70,23 @@ class NodeTest < ActiveSupport::TestCase
end
test "dns_server_update_command with failing command" do
- Rails.configuration.dns_server_update_command = 'false %{hostname}'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] = 'false %{hostname}'
refute Node.dns_server_update 'compute65535', '127.0.0.1'
end
test "dns update with no commands/dirs configured" do
- Rails.configuration.dns_server_update_command = false
- Rails.configuration.dns_server_conf_dir = false
- Rails.configuration.dns_server_conf_template = 'ignored!'
- Rails.configuration.dns_server_reload_command = 'ignored!'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] = false
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = false
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] = 'ignored!'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"] = 'ignored!'
assert Node.dns_server_update 'compute65535', '127.0.0.127'
end
test "don't leave temp files behind if there's an error writing them" do
- Rails.configuration.dns_server_conf_template = Rails.root.join 'config', 'unbound.template'
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] = Rails.root.join 'config', 'unbound.template'
Tempfile.any_instance.stubs(:puts).raises(IOError)
Dir.mktmpdir do |tmpdir|
- Rails.configuration.dns_server_conf_dir = tmpdir
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] = tmpdir
refute Node.dns_server_update 'compute65535', '127.0.0.127'
assert_empty Dir.entries(tmpdir).select{|f| File.file? f}
end
@@ -100,14 +100,14 @@ class NodeTest < ActiveSupport::TestCase
end
test "ping new node with no hostname and no config" do
- Rails.configuration.assign_node_hostname = false
+ Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"] = false
node = ping_node(:new_with_no_hostname, {})
refute_nil node.slot_number
assert_nil node.hostname
end
test "ping new node with zero padding config" do
- Rails.configuration.assign_node_hostname = 'compute%<slot_number>04d'
+ Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"] = 'compute%<slot_number>04d'
node = ping_node(:new_with_no_hostname, {})
slot_number = node.slot_number
refute_nil slot_number
@@ -121,7 +121,7 @@ class NodeTest < ActiveSupport::TestCase
end
test "ping node with hostname and no config and expect hostname unchanged" do
- Rails.configuration.assign_node_hostname = false
+ Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"] = false
node = ping_node(:new_with_custom_hostname, {})
assert_equal(23, node.slot_number)
assert_equal("custom1", node.hostname)
@@ -196,13 +196,13 @@ class NodeTest < ActiveSupport::TestCase
end
test 'run out of slots' do
- Rails.configuration.max_compute_nodes = 3
+ Rails.configuration.Containers["MaxComputeVMs"] = 3
act_as_system_user do
Node.destroy_all
(1..4).each do |i|
n = Node.create!
args = { ip: "10.0.0.#{i}", ping_secret: n.info['ping_secret'] }
- if i <= Rails.configuration.max_compute_nodes
+ if i <= Rails.configuration.Containers["MaxComputeVMs"]
n.ping(args)
else
assert_raises do
diff --git a/services/api/test/unit/repository_test.rb b/services/api/test/unit/repository_test.rb
index fa4c37f74..2f6235029 100644
--- a/services/api/test/unit/repository_test.rb
+++ b/services/api/test/unit/repository_test.rb
@@ -23,15 +23,15 @@ class RepositoryTest < ActiveSupport::TestCase
def default_git_url(repo_name, user_name=nil)
if user_name
"git at git.%s.arvadosapi.com:%s/%s.git" %
- [Rails.configuration.uuid_prefix, user_name, repo_name]
+ [Rails.configuration.ClusterID, user_name, repo_name]
else
"git at git.%s.arvadosapi.com:%s.git" %
- [Rails.configuration.uuid_prefix, repo_name]
+ [Rails.configuration.ClusterID, repo_name]
end
end
def assert_server_path(path_tail, repo_sym)
- assert_equal(File.join(Rails.configuration.git_repositories_dir, path_tail),
+ assert_equal(File.join(Rails.configuration.Git["Repositories"], path_tail),
repositories(repo_sym).server_path)
end
diff --git a/services/api/test/unit/user_notifier_test.rb b/services/api/test/unit/user_notifier_test.rb
index 008259c0b..b5688fcc6 100644
--- a/services/api/test/unit/user_notifier_test.rb
+++ b/services/api/test/unit/user_notifier_test.rb
@@ -14,12 +14,12 @@ class UserNotifierTest < ActionMailer::TestCase
assert_not_nil email
# Test the body of the sent email contains what we expect it to
- assert_equal Rails.configuration.user_notifier_email_from, email.from.first
+ assert_equal Rails.configuration.Users["UserNotifierEmailFrom"], email.from.first
assert_equal user.email, email.to.first
assert_equal 'Welcome to Arvados - shell account enabled', email.subject
assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),
'Expected Your Arvados shell account has been set up in email body'
- assert (email.body.to_s.include? Rails.configuration.workbench_address),
+ assert (email.body.to_s.include? Rails.configuration.Services["Workbench1"]["ExternalURL"]),
'Expected workbench url in email body'
end
diff --git a/services/api/test/unit/user_test.rb b/services/api/test/unit/user_test.rb
index 67c410047..52333db8e 100644
--- a/services/api/test/unit/user_test.rb
+++ b/services/api/test/unit/user_test.rb
@@ -110,7 +110,7 @@ class UserTest < ActiveSupport::TestCase
end
test "new username set avoiding blacklist" do
- Rails.configuration.auto_setup_name_blacklist = ["root"]
+ Rails.configuration.Users["AutoSetupUsernameBlacklist"] = ["root"]
check_new_username_setting("root", "root2")
end
@@ -170,8 +170,8 @@ class UserTest < ActiveSupport::TestCase
assert_equal 0, @all_users.count, "No admin users should exist (except for the system user)"
end
- Rails.configuration.auto_admin_first_user = auto_admin_first_user_config
- Rails.configuration.auto_admin_user = auto_admin_user_config
+ Rails.configuration.Users["AutoAdminFirstUser"] = auto_admin_first_user_config
+ Rails.configuration.Users["AutoAdminUserWithEmail"] = auto_admin_user_config
# See if the foo user has is_admin
foo = User.new
@@ -384,15 +384,15 @@ class UserTest < ActiveSupport::TestCase
test "create new user with auto setup #{active} #{email} #{auto_setup_vm} #{auto_setup_repo}" do
set_user_from_auth :admin
- Rails.configuration.auto_setup_new_users = true
+ Rails.configuration.Users["AutoSetupNewUsers"] = true
if auto_setup_vm
- Rails.configuration.auto_setup_new_users_with_vm_uuid = virtual_machines(:testvm)['uuid']
+ Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"] = virtual_machines(:testvm)['uuid']
else
- Rails.configuration.auto_setup_new_users_with_vm_uuid = false
+ Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"] = ""
end
- Rails.configuration.auto_setup_new_users_with_repository = auto_setup_repo
+ Rails.configuration.Users["AutoSetupNewUsersWithRepository"] = auto_setup_repo
create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username
end
@@ -625,12 +625,12 @@ class UserTest < ActiveSupport::TestCase
end
def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, expect_username)
- Rails.configuration.new_user_notification_recipients = new_user_recipients
- Rails.configuration.new_inactive_user_notification_recipients = inactive_recipients
+ Rails.configuration.Users["NewUserNotificationRecipients"] = new_user_recipients
+ Rails.configuration.Users["NewInactiveUserNotificationRecipients"] = inactive_recipients
ActionMailer::Base.deliveries = []
- can_setup = (Rails.configuration.auto_setup_new_users and
+ can_setup = (Rails.configuration.Users["AutoSetupNewUsers"] and
(not expect_username.nil?))
expect_repo_name = "#{expect_username}/#{expect_username}"
prior_repo = Repository.where(name: expect_repo_name).first
@@ -643,21 +643,21 @@ class UserTest < ActiveSupport::TestCase
assert_equal(expect_username, user.username)
# check user setup
- verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+ verify_link_exists(Rails.configuration.Users["AutoSetupNewUsers"] || active,
groups(:all_users).uuid, user.uuid,
"permission", "can_read")
# Check for OID login link.
- verify_link_exists(Rails.configuration.auto_setup_new_users || active,
+ verify_link_exists(Rails.configuration.Users["AutoSetupNewUsers"] || active,
user.uuid, user.email, "permission", "can_login")
# Check for repository.
if named_repo = (prior_repo or
Repository.where(name: expect_repo_name).first)
verify_link_exists((can_setup and prior_repo.nil? and
- Rails.configuration.auto_setup_new_users_with_repository),
+ Rails.configuration.Users["AutoSetupNewUsersWithRepository"]),
named_repo.uuid, user.uuid, "permission", "can_manage")
end
# Check for VM login.
- if auto_vm_uuid = Rails.configuration.auto_setup_new_users_with_vm_uuid
+ if auto_vm_uuid = Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"]
verify_link_exists(can_setup, auto_vm_uuid, user.uuid,
"permission", "can_login", "username", expect_username)
end
@@ -666,17 +666,17 @@ class UserTest < ActiveSupport::TestCase
new_user_email = nil
new_inactive_user_email = nil
- new_user_email_subject = "#{Rails.configuration.email_subject_prefix}New user created notification"
- if Rails.configuration.auto_setup_new_users
+ new_user_email_subject = "#{Rails.configuration.Users["EmailSubjectPrefix"]}New user created notification"
+ if Rails.configuration.Users["AutoSetupNewUsers"]
new_user_email_subject = (expect_username or active) ?
- "#{Rails.configuration.email_subject_prefix}New user created and setup notification" :
- "#{Rails.configuration.email_subject_prefix}New user created, but not setup notification"
+ "#{Rails.configuration.Users["EmailSubjectPrefix"]}New user created and setup notification" :
+ "#{Rails.configuration.Users["EmailSubjectPrefix"]}New user created, but not setup notification"
end
ActionMailer::Base.deliveries.each do |d|
if d.subject == new_user_email_subject then
new_user_email = d
- elsif d.subject == "#{Rails.configuration.email_subject_prefix}New inactive user notification" then
+ elsif d.subject == "#{Rails.configuration.Users["EmailSubjectPrefix"]}New inactive user notification" then
new_inactive_user_email = d
end
end
@@ -685,7 +685,7 @@ class UserTest < ActiveSupport::TestCase
# if the new user email recipients config parameter is set
if not new_user_recipients.empty? then
assert_not_nil new_user_email, 'Expected new user email after setup'
- assert_equal Rails.configuration.user_notifier_email_from, new_user_email.from[0]
+ assert_equal Rails.configuration.Users["UserNotifierEmailFrom"], new_user_email.from[0]
assert_equal new_user_recipients, new_user_email.to[0]
assert_equal new_user_email_subject, new_user_email.subject
else
@@ -695,9 +695,9 @@ class UserTest < ActiveSupport::TestCase
if not active
if not inactive_recipients.empty? then
assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'
- assert_equal Rails.configuration.user_notifier_email_from, new_inactive_user_email.from[0]
+ assert_equal Rails.configuration.Users["UserNotifierEmailFrom"], new_inactive_user_email.from[0]
assert_equal inactive_recipients, new_inactive_user_email.to[0]
- assert_equal "#{Rails.configuration.email_subject_prefix}New inactive user notification", new_inactive_user_email.subject
+ assert_equal "#{Rails.configuration.Users["EmailSubjectPrefix"]}New inactive user notification", new_inactive_user_email.subject
else
assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'
end
commit 450b51a554050504d6b510c2c9c1adf463b937a0
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Fri Mar 22 17:34:12 2019 -0400
13996: More config migrations, refactor some code into config_loader.rb
ActiveSupport::Duration is serialized properly when dumping config.
Non-empty string checks to use !.empty?
Because some configuration parameters could previously be either false
or a string, which are now just an empty string, but empty strings in
Ruby are truthy.
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index c22e2df1e..58c1c86dd 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -433,3 +433,7 @@ Clusters:
IssueReporterEmailTo: ""
SupportEmailAddress: ""
EmailFrom: ""
+ RemoteClusters:
+ "*":
+ Proxy: false
+ ActivateUsers: false
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 95e10498b..ced2f1620 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -46,34 +46,27 @@ class Arvados::V1::SchemaController < ApplicationController
rootUrl: root_url,
servicePath: "arvados/v1/",
batchPath: "batch",
- uuidPrefix: Rails.application.config.uuid_prefix,
- defaultTrashLifetime: Rails.application.config.default_trash_lifetime,
- blobSignatureTtl: Rails.application.config.blob_signature_ttl,
- maxRequestSize: Rails.application.config.max_request_size,
- maxItemsPerResponse: Rails.application.config.max_items_per_response,
- dockerImageFormats: Rails.application.config.docker_image_formats,
- crunchLogBytesPerEvent: Rails.application.config.crunch_log_bytes_per_event,
- crunchLogSecondsBetweenEvents: Rails.application.config.crunch_log_seconds_between_events,
- crunchLogThrottlePeriod: Rails.application.config.crunch_log_throttle_period,
- crunchLogThrottleBytes: Rails.application.config.crunch_log_throttle_bytes,
- crunchLogThrottleLines: Rails.application.config.crunch_log_throttle_lines,
- crunchLimitLogBytesPerJob: Rails.application.config.crunch_limit_log_bytes_per_job,
- crunchLogPartialLineThrottlePeriod: Rails.application.config.crunch_log_partial_line_throttle_period,
- crunchLogUpdatePeriod: Rails.application.config.crunch_log_update_period,
- crunchLogUpdateSize: Rails.application.config.crunch_log_update_size,
- remoteHosts: Rails.configuration.remote_hosts,
- remoteHostsViaDNS: Rails.configuration.remote_hosts_via_dns,
- websocketUrl: Rails.application.config.websocket_address,
- workbenchUrl: Rails.application.config.workbench_address,
- keepWebServiceUrl: Rails.application.config.keep_web_service_url,
- gitUrl: case Rails.application.config.git_repo_https_base
- when false
- ''
- when true
- 'https://git.%s.arvadosapi.com/' % Rails.configuration.ClusterID
- else
- Rails.application.config.git_repo_https_base
- end,
+ uuidPrefix: Rails.application.config.ClusterID,
+ defaultTrashLifetime: Rails.application.config.Collections["DefaultTrashLifetime"],
+ blobSignatureTtl: Rails.application.config.Collections["BlobSigningTTL"],
+ maxRequestSize: Rails.application.config.API["MaxRequestSize"],
+ maxItemsPerResponse: Rails.application.API["MaxItemsPerResponse"],
+ dockerImageFormats: Rails.application.config.Containers["SupportedDockerImageFormats"],
+ crunchLogBytesPerEvent: Rails.application.config.Containers["Logging"]["LogBytesPerEvent"],
+ crunchLogSecondsBetweenEvents: Rails.application.config.Containers["Logging"]["LogSecondsBetweenEvents"],
+ crunchLogThrottlePeriod: Rails.application.config.Containers["Logging"]["LogThrottlePeriod"],
+ crunchLogThrottleBytes: Rails.application.config.Containers["Logging"]["LogThrottleBytes"],
+ crunchLogThrottleLines: Rails.application.config.Containers["Logging"]["LogThrottleLines"],
+ crunchLimitLogBytesPerJob: Rails.application.config.Containers["Logging"]["LimitLogBytesPerJob"],
+ crunchLogPartialLineThrottlePeriod: Rails.application.config.Containers["Logging"]["LogPartialLineThrottlePeriod"],
+ crunchLogUpdatePeriod: Rails.application.config.Containers["Logging"]["LogUpdatePeriod"],
+ crunchLogUpdateSize: Rails.application.config.Containers["Logging"]["LogUpdateSize"],
+ remoteHosts: Rails.application.config.RemoteClusters.map {|k,v| v.Host},
+ remoteHostsViaDNS: Rails.application.config.RemoteClusters["*"]["Proxy"],
+ websocketUrl: Rails.application.config.Services["Websocket"]["ExternalURL"],
+ workbenchUrl: Rails.application.config.Services["Workbench1"]["ExternalURL"],
+ keepWebServiceUrl: Rails.application.config.Services["WebDAV"]["ExternalURL"],
+ gitUrl: Rails.application.config.Services["GitHTTP"]["ExternalURL"],
parameters: {
alt: {
type: "string",
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
index 53ab6ccb9..24756b93c 100644
--- a/services/api/app/controllers/static_controller.rb
+++ b/services/api/app/controllers/static_controller.rb
@@ -12,7 +12,7 @@ class StaticController < ApplicationController
def home
respond_to do |f|
f.html do
- if Rails.configuration.Services["Workbench1"]["ExternalURL"]
+ if !Rails.configuration.Services["Workbench1"]["ExternalURL"].empty?
redirect_to Rails.configuration.Services["Workbench1"]["ExternalURL"]
else
render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb
index fc8ae5282..db99c0bc1 100644
--- a/services/api/app/models/api_client_authorization.rb
+++ b/services/api/app/models/api_client_authorization.rb
@@ -87,8 +87,8 @@ class ApiClientAuthorization < ArvadosModel
end
def self.remote_host(uuid_prefix:)
- Rails.configuration.remote_hosts[uuid_prefix] ||
- (Rails.configuration.remote_hosts_via_dns &&
+ Rails.configuration.RemoteClusters[uuid_prefix]["Host"] ||
+ (Rails.configuration.RemoteClusters["*"]["Proxy"] &&
uuid_prefix+".arvadosapi.com")
end
@@ -188,7 +188,7 @@ class ApiClientAuthorization < ArvadosModel
end
if Rails.configuration.Users["NewUsersAreActive"] ||
- Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
+ Rails.configuration.RemoteClusters[remote_user['uuid'][0..4]].andand["ActivateUsers"]
# Update is_active to whatever it is at the remote end
user.is_active = remote_user['is_active']
elsif !remote_user['is_active']
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index 536653fa1..aaae19f4b 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -518,7 +518,7 @@ class Collection < ArvadosModel
if loc = Keep::Locator.parse(search_term)
loc.strip_hints!
coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1)
- if coll_match.any? or Rails.configuration.remote_hosts.length == 0
+ if coll_match.any? or Rails.configuration.RemoteClusters.length > 1
return get_compatible_images(readers, pattern, coll_match)
else
# Allow bare pdh that doesn't exist in the local database so
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index 9a99f7260..bf569a2d8 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -199,7 +199,8 @@ class Node < ArvadosModel
ptr_domain: ptr_domain,
}
- if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]
+ if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"].empty?)
tmpfile = nil
begin
begin
@@ -227,7 +228,7 @@ class Node < ArvadosModel
end
end
- if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"]
+ if !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"].empty?
cmd = Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] % template_vars
if not system cmd
logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
@@ -235,7 +236,8 @@ class Node < ArvadosModel
end
end
- if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"]
+ if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"].empty?)
restartfile = File.join(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], 'restart.txt')
begin
File.open(restartfile, 'w') do |f|
@@ -261,9 +263,9 @@ class Node < ArvadosModel
# At startup, make sure all DNS entries exist. Otherwise, slurmctld
# will refuse to start.
- if (Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and
- Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] and
- Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"])
+ if (!Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"].empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"].empty? and
+ !Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"].empty?)
(0..Rails.configuration.Containers["MaxComputeVMs"]-1).each do |slot_number|
hostname = hostname_for_slot(slot_number)
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
index bde9d51d2..ac89ecf6b 100644
--- a/services/api/app/models/repository.rb
+++ b/services/api/app/models/repository.rb
@@ -98,16 +98,15 @@ class Repository < ArvadosModel
end
def ssh_clone_url
- _clone_url :git_repo_ssh_base, 'git at git.%s.arvadosapi.com:'
+ _clone_url Rails.configuration.Services["GitSSH"]["ExternalURL"], 'git at git.%s.arvadosapi.com:'
end
def https_clone_url
- _clone_url :git_repo_https_base, 'https://git.%s.arvadosapi.com/'
+ _clone_url Rails.configuration.Services["GitHTTP"]["ExternalURL"], 'https://git.%s.arvadosapi.com/'
end
def _clone_url config_var, default_base_fmt
- configured_base = Rails.configuration.send config_var
- return nil if configured_base == false
+ configured_base = config_var
prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
if prefix == Rails.configuration.ClusterID and configured_base != true
base = configured_base
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 49d3afe7b..9aa6fe2c9 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -364,7 +364,7 @@ class User < ArvadosModel
def check_auto_admin
return if self.uuid.end_with?('anonymouspublic')
if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
- Rails.configuration.Users["AutoAdminUserWithEmail"] and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or
+ !Rails.configuration.Users["AutoAdminUserWithEmail"].empty? and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or
(User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and
Rails.configuration.Users["AutoAdminFirstUser"])
self.is_admin = true
diff --git a/services/api/app/views/admin_notifier/new_inactive_user.text.erb b/services/api/app/views/admin_notifier/new_inactive_user.text.erb
index 097412c25..fa8652b8a 100644
--- a/services/api/app/views/admin_notifier/new_inactive_user.text.erb
+++ b/services/api/app/views/admin_notifier/new_inactive_user.text.erb
@@ -7,10 +7,10 @@ A new user landed on the inactive user page:
<%= @user.full_name %> <<%= @user.email %>>
-<% if Rails.configuration.workbench_address -%>
+<% if Rails.configuration.Services["Workbench1"]["ExternalURL"] -%>
Please see workbench for more information:
- <%= Rails.configuration.workbench_address %>
+ <%= Rails.configuration.Services["Workbench1"]["ExternalURL"] %>
<% end -%>
Thanks,
diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb
index 20a36afcb..a96dfdba9 100644
--- a/services/api/app/views/admin_notifier/new_user.text.erb
+++ b/services/api/app/views/admin_notifier/new_user.text.erb
@@ -14,10 +14,10 @@ A new user has been created<%=add_to_message%>:
This user is <%= @user.is_active ? '' : 'NOT ' %>active.
-<% if Rails.configuration.workbench_address -%>
+<% if Rails.configuration.Services["Workbench1"]["ExternalURL"] -%>
Please see workbench for more information:
- <%= Rails.configuration.workbench_address %>
+ <%= Rails.configuration.Services["Workbench1"]["ExternalURL"] %>
<% end -%>
Thanks,
diff --git a/services/api/app/views/user_notifier/account_is_setup.text.erb b/services/api/app/views/user_notifier/account_is_setup.text.erb
index ca7082774..417698c38 100644
--- a/services/api/app/views/user_notifier/account_is_setup.text.erb
+++ b/services/api/app/views/user_notifier/account_is_setup.text.erb
@@ -8,9 +8,9 @@ SPDX-License-Identifier: AGPL-3.0 %>
Hi there,
<% end -%>
-Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.workbench_address %>at
+Your Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services["Workbench1"]["ExternalURL"] %>at
- <%= Rails.configuration.workbench_address %><%= "/" if !Rails.configuration.workbench_address.end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
+ <%= Rails.configuration.Services["Workbench1"]["ExternalURL"] %><%= "/" if !Rails.configuration.Services["Workbench1"]["ExternalURL"].end_with?("/") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>
for connection instructions.
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 80198d2db..53439cec8 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -2,6 +2,8 @@
#
# SPDX-License-Identifier: AGPL-3.0
+require 'config_loader'
+
begin
# If secret_token.rb exists here, we need to load it first.
require_relative 'secret_token.rb'
@@ -39,37 +41,8 @@ $arvados_config = {}
end
end
-def set_cfg cfg, k, v
- # "foo.bar: baz" --> { config.foo.bar = baz }
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg[kk]
- if cfg.nil?
- break
- end
- end
- if !cfg.nil?
- cfg[k] = v
- end
-end
-
-$config_migrate_map = {}
-$config_types = {}
-def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
- if migrate_from
- $config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
- set_cfg cfg, assign_to, v
- }
- end
- $config_types[assign_to] = configtype
-end
-
-module Boolean; end
-class TrueClass; include Boolean; end
-class FalseClass; include Boolean; end
-
-declare_config "ClusterID", String, :uuid_prefix
+declare_config "ClusterID", NonemptyString, :uuid_prefix
+declare_config "ManagementToken", NonemptyString, :ManagementToken
declare_config "Git.Repositories", String, :git_repositories_dir
declare_config "API.DisabledAPIs", Array, :disable_api_methods
declare_config "API.MaxRequestSize", Integer, :max_request_size
@@ -89,10 +62,10 @@ declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
-declare_config "Login.ProviderAppSecret", String, :sso_app_secret
-declare_config "Login.ProviderAppID", String, :sso_app_id
+declare_config "Login.ProviderAppSecret", NonemptyString, :sso_app_secret
+declare_config "Login.ProviderAppID", NonemptyString, :sso_app_id
declare_config "TLS.Insecure", Boolean, :sso_insecure
-declare_config "Services.SSO.ExternalURL", String, :sso_provider_url
+declare_config "Services.SSO.ExternalURL", NonemptyString, :sso_provider_url
declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
@@ -102,7 +75,7 @@ declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :def
declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
-declare_config "Collections.BlobSigningKey", String, :blob_signing_key
+declare_config "Collections.BlobSigningKey", NonemptyString, :blob_signing_key
declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
@@ -129,9 +102,7 @@ declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_s
declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
-declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) {
- set_cfg cfg, "Containers.JobsAPI.Enable", if v.is_a? Boolean then v.to_s else v end
-}
+declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) { set_cfg cfg, "Containers.JobsAPI.Enable", v.to_s }
declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
@@ -145,6 +116,20 @@ declare_config "Services.Websocket.ExternalURL", String, :websocket_address
declare_config "Services.WebDAV.ExternalURL", String, :keep_web_service_url
declare_config "Services.GitHTTP.ExternalURL", String, :git_repo_https_base
declare_config "Services.GitSSH.ExternalURL", String, :git_repo_ssh_base
+declare_config "RemoteClusters", Hash, :remote_hosts, ->(cfg, k, v) {
+ h = {}
+ v.each do |clusterid, host|
+ h[clusterid] = {
+ "Host" => host,
+ "Proxy" => true,
+ "Scheme" => "https",
+ "Insecure" => false,
+ "ActivateUsers" => false
+ }
+ end
+ set_cfg cfg, "RemoteClusters", h
+}
+declare_config "RemoteClusters.*.Proxy", Boolean, :remote_hosts_via_dns
application_config = {}
%w(application.default application).each do |cfgfile|
@@ -159,53 +144,18 @@ application_config = {}
end
end
-application_config.each do |k, v|
- if $config_migrate_map[k.to_sym]
- $config_migrate_map[k.to_sym].call $arvados_config, k, v
- else
- set_cfg $arvados_config, k, v
- end
-end
-
-duration_re = /(\d+(\.\d+)?)(ms|s|m|h)/
-
-$config_types.each do |cfgkey, cfgtype|
- cfg = $arvados_config
- k = cfgkey
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg[kk]
- if cfg.nil?
- break
- end
- end
-
- if cfg.nil?
- raise "missing #{cfgkey}"
- end
+remainders = migrate_config application_config, $arvados_config
- if cfgtype == String and !cfg[k]
- cfg[k] = ""
- end
- if cfgtype == ActiveSupport::Duration
- if cfg[k].is_a? Integer
- cfg[k] = cfg[k].seconds
- elsif cfg[k].is_a? String
- mt = duration_re.match cfg[k]
- if !mt
- raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are ms, s, m, h"
- end
- multiplier = {ms: 0.001, s: 1, m: 60, h: 3600}
- cfg[k] = (Float(mt[1]) * multiplier[mt[3].to_sym]).seconds
+if application_config[:auto_activate_users_from]
+ application_config[:auto_activate_users_from].each do |cluster|
+ if $arvados_config.RemoteClusters[cluster]
+ $arvados_config.RemoteClusters[cluster]["ActivateUsers"] = true
end
end
-
- if !cfg[k].is_a? cfgtype
- raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
- end
end
+coercion_and_check $arvados_config
+
Server::Application.configure do
nils = []
$arvados_config.each do |k, v|
@@ -223,6 +173,10 @@ Server::Application.configure do
cfg.send "#{k}=", v
end
end
+ remainders.each do |k, v|
+ config.send "#{k}=", v
+ end
+
if !nils.empty?
raise <<EOS
Refusing to start in #{::Rails.env.to_s} mode with missing configuration.
diff --git a/services/api/lib/config_loader.rb b/services/api/lib/config_loader.rb
new file mode 100644
index 000000000..6bfbdd7a8
--- /dev/null
+++ b/services/api/lib/config_loader.rb
@@ -0,0 +1,134 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+module Psych
+ module Visitors
+ class YAMLTree < Psych::Visitors::Visitor
+ def visit_ActiveSupport_Duration o
+ seconds = o.to_i
+ outstr = ""
+ if seconds / 3600 > 0
+ outstr += "#{seconds / 3600}h"
+ seconds = seconds % 3600
+ end
+ if seconds / 60 > 0
+ outstr += "#{seconds / 60}m"
+ seconds = seconds % 60
+ end
+ if seconds > 0
+ outstr += "#{seconds}s"
+ end
+ if outstr == ""
+ outstr = "0s"
+ end
+ @emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY
+ end
+ end
+ end
+end
+
+def set_cfg cfg, k, v
+ # "foo.bar: baz" --> { config.foo.bar = baz }
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+ if !cfg.nil?
+ cfg[k] = v
+ end
+end
+
+$config_migrate_map = {}
+$config_types = {}
+def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
+ if migrate_from
+ $config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
+ set_cfg cfg, assign_to, v
+ }
+ end
+ $config_types[assign_to] = configtype
+end
+
+module Boolean; end
+class TrueClass; include Boolean; end
+class FalseClass; include Boolean; end
+
+class NonemptyString < String
+end
+
+def parse_duration durstr
+ duration_re = /(\d+(\.\d+)?)(s|m|h)/
+ dursec = 0
+ while durstr != ""
+ mt = duration_re.match durstr
+ if !mt
+ raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are s, m, h"
+ end
+ multiplier = {s: 1, m: 60, h: 3600}
+ dursec += (Float(mt[1]) * multiplier[mt[3].to_sym])
+ durstr = durstr[mt[0].length..-1]
+ end
+ return dursec.seconds
+end
+
+def migrate_config from_config, to_config
+ remainders = {}
+ from_config.each do |k, v|
+ if $config_migrate_map[k.to_sym]
+ $config_migrate_map[k.to_sym].call to_config, k, v
+ else
+ remainders[k] = v
+ end
+ end
+ remainders
+end
+
+def coercion_and_check check_cfg
+ $config_types.each do |cfgkey, cfgtype|
+ cfg = check_cfg
+ k = cfgkey
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+
+ if cfg.nil?
+ raise "missing #{cfgkey}"
+ end
+
+ if cfgtype == String and !cfg[k]
+ cfg[k] = ""
+ end
+
+ if cfgtype == NonemptyString
+ if (!cfg[k] || cfg[k] == "")
+ raise "#{cfgkey} cannot be empty"
+ end
+ if cfg[k].is_a? String
+ next
+ end
+ end
+
+ if cfgtype == ActiveSupport::Duration
+ if cfg[k].is_a? Integer
+ cfg[k] = cfg[k].seconds
+ elsif cfg[k].is_a? String
+ cfg[k] = parse_duration cfg[k]
+ end
+ end
+
+ if !cfg[k].is_a? cfgtype
+ raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+ end
+ end
+
+end
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
index 4532225a3..bc6deb4bb 100644
--- a/services/api/lib/tasks/config_dump.rake
+++ b/services/api/lib/tasks/config_dump.rake
@@ -5,6 +5,8 @@
namespace :config do
desc 'Show site configuration'
task dump: :environment do
- puts $arvados_config.to_yaml
+ cfg = { "Clusters" => {}}
+ cfg["Clusters"][$arvados_config["ClusterID"]] = $arvados_config.select {|k,v| k != "ClusterID"}
+ puts cfg.to_yaml
end
end
diff --git a/services/api/lib/tasks/delete_old_job_logs.rake b/services/api/lib/tasks/delete_old_job_logs.rake
index 327f663b2..6f195e954 100644
--- a/services/api/lib/tasks/delete_old_job_logs.rake
+++ b/services/api/lib/tasks/delete_old_job_logs.rake
@@ -9,7 +9,7 @@
namespace :db do
desc "Remove old job stderr entries from the logs table"
task delete_old_job_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Rails.configuration.Containers["Logging"]["MaxAge"]} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Containers["Logging"]["MaxAge"]} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
commit 09a1ecf507df8ca110e6620efeb3593bc0d90192
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Fri Mar 22 10:42:51 2019 -0400
13996: Parsing durations wip
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index 70162ee5f..c22e2df1e 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -308,7 +308,7 @@ Clusters:
# containers that have been finished for at least this many seconds,
# and delete their stdout, stderr, arv-mount, crunch-run, and
# crunchstat logs from the logs table.
- MaxAge: 30d
+ MaxAge: 720h
# These two settings control how frequently log events are flushed to the
# database. Log lines are buffered until either crunch_log_bytes_per_event
@@ -388,12 +388,12 @@ Clusters:
AssignNodeHostname: "compute%<slot_number>d"
JobsAPI:
- # Enable the legacy Jobs API.
- # auto -- (default) enable the Jobs API only if it has been used before
+ # Enable the legacy Jobs API. This value must be a string.
+ # 'auto' -- (default) enable the Jobs API only if it has been used before
# (i.e., there are job records in the database)
- # true -- enable the Jobs API despite lack of existing records.
- # false -- disable the Jobs API despite presence of existing records.
- Enable: auto
+ # 'true' -- enable the Jobs API despite lack of existing records.
+ # 'false' -- disable the Jobs API despite presence of existing records.
+ Enable: 'auto'
# Git repositories must be readable by api server, or you won't be
# able to submit crunch jobs. To pass the test suites, put a clone
@@ -425,11 +425,11 @@ Clusters:
# original job reuse behavior, and is still the default).
ReuseJobIfOutputsDiffer: false
- Mail:
- MailchimpAPIKey: ""
- MailchimpListID: ""
- SendUserSetupNotificationEmail: ""
- IssueReporterEmailFrom: ""
- IssueReporterEmailTo: ""
- SupportEmailAddress: ""
- EmailFrom: ""
+ Mail:
+ MailchimpAPIKey: ""
+ MailchimpListID: ""
+ SendUserSetupNotificationEmail: ""
+ IssueReporterEmailFrom: ""
+ IssueReporterEmailTo: ""
+ SupportEmailAddress: ""
+ EmailFrom: ""
diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml
index 66e09f671..aca066935 100644
--- a/services/api/config/application.default.yml
+++ b/services/api/config/application.default.yml
@@ -14,17 +14,6 @@
common:
- # When you run the db:delete_old_job_logs task, it will find jobs that
- # have been finished for at least this many seconds, and delete their
- # stderr logs from the logs table.
- clean_job_log_rows_after: <%= 30.days %>
-
- # When you run the db:delete_old_container_logs task, it will find
- # containers that have been finished for at least this many seconds,
- # and delete their stdout, stderr, arv-mount, crunch-run, and
- # crunchstat logs from the logs table.
- clean_container_log_rows_after: <%= 30.days %>
-
## Set Time.zone default to the specified zone and make Active
## Record auto-convert to this zone. Run "rake -D time" for a list
## of tasks for finding time zone names. Default is UTC.
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 0a99b1afc..80198d2db 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -56,9 +56,9 @@ end
$config_migrate_map = {}
$config_types = {}
-def declare_config(assign_to, configtype, migrate_from=nil)
+def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)
if migrate_from
- $config_migrate_map[migrate_from] = ->(cfg, k, v) {
+ $config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {
set_cfg cfg, assign_to, v
}
end
@@ -129,7 +129,9 @@ declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_s
declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
-declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api
+declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api, ->(cfg, k, v) {
+ set_cfg cfg, "Containers.JobsAPI.Enable", if v.is_a? Boolean then v.to_s else v end
+}
declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
@@ -165,6 +167,8 @@ application_config.each do |k, v|
end
end
+duration_re = /(\d+(\.\d+)?)(ms|s|m|h)/
+
$config_types.each do |cfgkey, cfgtype|
cfg = $arvados_config
k = cfgkey
@@ -176,6 +180,11 @@ $config_types.each do |cfgkey, cfgtype|
break
end
end
+
+ if cfg.nil?
+ raise "missing #{cfgkey}"
+ end
+
if cfgtype == String and !cfg[k]
cfg[k] = ""
end
@@ -183,14 +192,15 @@ $config_types.each do |cfgkey, cfgtype|
if cfg[k].is_a? Integer
cfg[k] = cfg[k].seconds
elsif cfg[k].is_a? String
- # TODO handle suffixes
+ mt = duration_re.match cfg[k]
+ if !mt
+ raise "#{cfgkey} not a valid duration: '#{cfg[k]}', accepted suffixes are ms, s, m, h"
+ end
+ multiplier = {ms: 0.001, s: 1, m: 60, h: 3600}
+ cfg[k] = (Float(mt[1]) * multiplier[mt[3].to_sym]).seconds
end
end
- if cfg.nil?
- raise "missing #{cfgkey}"
- end
-
if !cfg[k].is_a? cfgtype
raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
end
commit 47abf19591c9816f88f83db9fd2cbe93e2262e79
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Thu Mar 21 16:39:32 2019 -0400
13996: Migrate majority of defaults to config.defaults.yml
API server knows types of config parameters (needed for type coercion,
also useful for type checking.)
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index 53fc5d9cb..70162ee5f 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -1,5 +1,15 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
#
+# SPDX-License-Identifier: AGPL-3.0
+
+# Do not use this file for site configuration. Create
+# /etc/arvados/config.yml instead.
#
+# The order of precedence (highest to lowest):
+# 1. Legacy component-specific config files (deprecated)
+# 2. /etc/arvados/config.yml
+# 3. config.defaults.yml
+
Clusters:
xxxxx:
SystemRootToken: ""
@@ -8,6 +18,51 @@ Clusters:
# Server expects request header of the format "Authorization: Bearer xxx"
ManagementToken: ""
+ Services:
+ RailsAPI:
+ InternalURLs: {}
+ GitHTTP:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepstore:
+ InternalURLs: {}
+ Controller:
+ InternalURLs: {}
+ ExternalURL: ""
+ Websocket:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepbalance:
+ InternalURLs: {}
+ GitHTTP:
+ InternalURLs: {}
+ ExternalURL: ""
+ GitSSH:
+ ExternalURL: ""
+ DispatchCloud:
+ InternalURLs: {}
+ SSO:
+ ExternalURL: ""
+ Keepproxy:
+ InternalURLs: {}
+ ExternalURL: ""
+ WebDAV:
+ InternalURLs: {}
+ ExternalURL: ""
+ WebDAVDownload:
+ InternalURLs: {}
+ ExternalURL: ""
+ Keepstore:
+ InternalURLs: {}
+ Composer:
+ ExternalURL: ""
+ WebShell:
+ ExternalURL: ""
+ Workbench1:
+ InternalURLs: {}
+ ExternalURL: ""
+ Workbench2:
+ ExternalURL: ""
API:
# Maximum size (in bytes) allowed for a single API request. This
# limit is published in the discovery document for use by clients.
@@ -38,6 +93,11 @@ Clusters:
# Example: ["jobs.create", "pipeline_instances.create"]
DisabledAPIs: []
+ # Interval (seconds) between asynchronous permission view updates. Any
+ # permission-updating API called with the 'async' parameter schedules a an
+ # update on the permission view in the future, if not already scheduled.
+ AsyncPermissionsUpdateInterval: 20
+
Users:
# Config parameters to automatically setup new users. If enabled,
# this users will be able to self-activate. Enable this if you want
@@ -165,11 +225,6 @@ Clusters:
# arrived, and deleted if their delete_at time has arrived.
TrashSweepInterval: 60
- # Interval (seconds) between asynchronous permission view updates. Any
- # permission-updating API called with the 'async' parameter schedules a an
- # update on the permission view in the future, if not already scheduled.
- AsyncPermissionsUpdateInterval: 20
-
# If true, enable collection versioning.
# When a collection's preserve_version field is true or the current version
# is older than the amount of seconds defined on preserve_version_if_idle,
@@ -195,6 +250,9 @@ Clusters:
# {git_repositories_dir}/arvados/.git
Repositories: /var/lib/arvados/git/repositories
+ TLS:
+ Insecure: false
+
Containers:
# List of supported Docker Registry image formats that compute nodes
# are able to use. `arv keep docker` will error out if a user tries
@@ -327,7 +385,7 @@ Clusters:
# Example for compute0000, compute0001, ....:
# assign_node_hostname: compute%<slot_number>04d
# (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
- AssignNodeHostname: compute%<slot_number>d
+ AssignNodeHostname: "compute%<slot_number>d"
JobsAPI:
# Enable the legacy Jobs API.
@@ -346,8 +404,8 @@ Clusters:
# Docker image to be used when none found in runtime_constraints of a job
DefaultDockerImage: ""
- # :none or :slurm_immediate
- CrunchJobWrapper: :none
+ # none or slurm_immediate
+ CrunchJobWrapper: none
# username, or false = do not set uid when running jobs.
CrunchJobUser: crunch
@@ -368,10 +426,10 @@ Clusters:
ReuseJobIfOutputsDiffer: false
Mail:
- MailchimpAPIKey: # api-server/mailchimp_api_key
- MailchimpListID: # api-server/mailchimp_list_id
- SendUserSetupNotificationEmail: # workbench/send_user_setup_notification_email
- IssueReporterEmailFrom: # workbench/issue_reporter_email_from
- IssueReporterEmailTo: # workbench/issue_reporter_email_to
- SupportEmailAddress: # workbench/support_email_address
- EmailFrom: # workbench/email_from
+ MailchimpAPIKey: ""
+ MailchimpListID: ""
+ SendUserSetupNotificationEmail: ""
+ IssueReporterEmailFrom: ""
+ IssueReporterEmailTo: ""
+ SupportEmailAddress: ""
+ EmailFrom: ""
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index 78fea32b2..1ec921b8a 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -53,8 +53,6 @@ class ApplicationController < ActionController::Base
before_action(:render_404_if_no_object,
except: [:index, :create] + ERROR_ACTIONS)
- theme Rails.configuration.arvados_theme
-
attr_writer :resource_attrs
begin
@@ -83,15 +81,10 @@ class ApplicationController < ActionController::Base
def default_url_options
options = {}
- if Rails.configuration.host
- options[:host] = Rails.configuration.host
- end
- if Rails.configuration.port
- options[:port] = Rails.configuration.port
- end
- if Rails.configuration.protocol
- options[:protocol] = Rails.configuration.protocol
- end
+ exturl = URI.parse(Rails.configuration.Services["Controller"]["ExternalURL"])
+ options[:host] = exturl.host
+ options[:port] = exturl.port
+ options[:protocol] = exturl.scheme
options
end
diff --git a/services/api/app/controllers/static_controller.rb b/services/api/app/controllers/static_controller.rb
index b421f5459..53ab6ccb9 100644
--- a/services/api/app/controllers/static_controller.rb
+++ b/services/api/app/controllers/static_controller.rb
@@ -12,8 +12,8 @@ class StaticController < ApplicationController
def home
respond_to do |f|
f.html do
- if Rails.configuration.workbench_address
- redirect_to Rails.configuration.workbench_address
+ if Rails.configuration.Services["Workbench1"]["ExternalURL"]
+ redirect_to Rails.configuration.Services["Workbench1"]["ExternalURL"]
else
render_not_found "Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead."
end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
index c5d3ae74f..cab544263 100644
--- a/services/api/app/controllers/user_sessions_controller.rb
+++ b/services/api/app/controllers/user_sessions_controller.rb
@@ -120,7 +120,7 @@ class UserSessionsController < ApplicationController
flash[:notice] = 'You have logged off'
return_to = params[:return_to] || root_url
- redirect_to "#{Rails.configuration.sso_provider_url}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
+ redirect_to "#{Rails.configuration.Services["SSO"]["ExternalURL"]}/users/sign_out?redirect_uri=#{CGI.escape return_to}"
end
# login - Just bounce to /auth/joshid. The only purpose of this function is
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index 3c4712fde..9a99f7260 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -203,7 +203,7 @@ class Node < ArvadosModel
tmpfile = nil
begin
begin
- template = IO.read(Rails.configuration.Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"])
+ template = IO.read(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"])
rescue IOError, SystemCallError => e
logger.error "Reading #{Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]}: #{e.message}"
raise
diff --git a/services/api/config/application.default.yml b/services/api/config/application.default.yml
index 98443b428..66e09f671 100644
--- a/services/api/config/application.default.yml
+++ b/services/api/config/application.default.yml
@@ -13,216 +13,6 @@
# 5. Section in application.default.yml called "common"
common:
- ###
- ### Essential site configuration
- ###
-
- # The prefix used for all database identifiers to identify the record as
- # originating from this site. Must be exactly 5 alphanumeric characters
- # (lowercase ASCII letters and digits).
- uuid_prefix: ~
-
- # secret_token is a string of alphanumeric characters used by Rails
- # to sign session tokens. IMPORTANT: This is a site secret. It
- # should be at least 50 characters.
- secret_token: ~
-
- # blob_signing_key is a string of alphanumeric characters used to
- # generate permission signatures for Keep locators. It must be
- # identical to the permission key given to Keep. IMPORTANT: This is
- # a site secret. It should be at least 50 characters.
- #
- # Modifying blob_signing_key will invalidate all existing
- # signatures, which can cause programs to fail (e.g., arv-put,
- # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
- # no such processes are running.
- blob_signing_key: ~
-
- # These settings are provided by your OAuth2 provider (e.g.,
- # sso-provider).
- sso_app_secret: ~
- sso_app_id: ~
- sso_provider_url: ~
-
- # If this is not false, HTML requests at the API server's root URL
- # are redirected to this location, and it is provided in the text of
- # user activation notification email messages to remind them where
- # to log in.
- workbench_address: false
-
- # Client-facing URI for websocket service. Nginx should be
- # configured to proxy this URI to arvados-ws; see
- # http://doc.arvados.org/install/install-ws.html
- #
- # If websocket_address is false (which is the default), no websocket
- # server will be advertised to clients. This configuration is not
- # supported.
- #
- # Example:
- #websocket_address: wss://ws.zzzzz.arvadosapi.com/websocket
- websocket_address: false
-
- # Maximum number of websocket connections allowed
- websocket_max_connections: 500
-
- # Maximum number of events a single connection can be backlogged
- websocket_max_notify_backlog: 1000
-
- # Maximum number of subscriptions a single websocket connection can have
- # active.
- websocket_max_filters: 10
-
- # Git repositories must be readable by api server, or you won't be
- # able to submit crunch jobs. To pass the test suites, put a clone
- # of the arvados tree in {git_repositories_dir}/arvados.git or
- # {git_repositories_dir}/arvados/.git
- git_repositories_dir: /var/lib/arvados/git/repositories
-
- # This is a (bare) repository that stores commits used in jobs. When a job
- # runs, the source commits are first fetched into this repository, then this
- # repository is used to deploy to compute nodes. This should NOT be a
- # subdirectory of {git_repositiories_dir}.
- git_internal_dir: /var/lib/arvados/internal.git
-
- # Default replication level for collections. This is used when a
- # collection's replication_desired attribute is nil.
- default_collection_replication: 2
-
-
- ###
- ### Overriding default advertised hostnames/URLs
- ###
-
- # If not false, this is the hostname, port, and protocol that will be used
- # for root_url and advertised in the discovery document. By default, use
- # the default Rails logic for deciding on a hostname.
- host: false
- port: false
- protocol: false
-
- # Base part of SSH git clone url given with repository resources. If
- # true, the default "git at git.(uuid_prefix).arvadosapi.com:" is
- # used. If false, SSH clone URLs are not advertised. Include a
- # trailing ":" or "/" if needed: it will not be added automatically.
- git_repo_ssh_base: true
-
- # Base part of HTTPS git clone urls given with repository
- # resources. This is expected to be an arv-git-httpd service which
- # accepts API tokens as HTTP-auth passwords. If true, the default
- # "https://git.(uuid_prefix).arvadosapi.com/" is used. If false,
- # HTTPS clone URLs are not advertised. Include a trailing ":" or "/"
- # if needed: it will not be added automatically.
- git_repo_https_base: true
-
-
- ###
- ### New user and & email settings
- ###
-
- # Config parameters to automatically setup new users. If enabled,
- # this users will be able to self-activate. Enable this if you want
- # to run an open instance where anyone can create an account and use
- # the system without requiring manual approval.
- #
- # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
- # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
- auto_setup_new_users: false
- auto_setup_new_users_with_vm_uuid: false
- auto_setup_new_users_with_repository: false
- auto_setup_name_blacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
-
- # When new_users_are_active is set to true, new users will be active
- # immediately. This skips the "self-activate" step which enforces
- # user agreements. Should only be enabled for development.
- new_users_are_active: false
-
- # The e-mail address of the user you would like to become marked as an admin
- # user on their first login.
- # In the default configuration, authentication happens through the Arvados SSO
- # server, which uses OAuth2 against Google's servers, so in that case this
- # should be an address associated with a Google account.
- auto_admin_user: false
-
- # If auto_admin_first_user is set to true, the first user to log in when no
- # other admin users exist will automatically become an admin user.
- auto_admin_first_user: false
-
- # Email address to notify whenever a user creates a profile for the
- # first time
- user_profile_notification_address: false
-
- admin_notifier_email_from: arvados at example.com
- email_subject_prefix: "[ARVADOS] "
- user_notifier_email_from: arvados at example.com
- new_user_notification_recipients: [ ]
- new_inactive_user_notification_recipients: [ ]
-
-
- ###
- ### Limits, timeouts and durations
- ###
-
- # Lifetime (in seconds) of blob permission signatures generated by
- # the API server. This determines how long a client can take (after
- # retrieving a collection record) to retrieve the collection data
- # from Keep. If the client needs more time than that (assuming the
- # collection still has the same content and the relevant user/token
- # still has permission) the client can retrieve the collection again
- # to get fresh signatures.
- #
- # This must be exactly equal to the -blob-signature-ttl flag used by
- # keepstore servers. Otherwise, reading data blocks and saving
- # collections will fail with HTTP 403 permission errors.
- #
- # Modifying blob_signature_ttl invalidates existing signatures; see
- # blob_signing_key note above.
- #
- # The default is 2 weeks.
- blob_signature_ttl: 1209600
-
- # Default lifetime for ephemeral collections: 2 weeks. This must not
- # be less than blob_signature_ttl.
- default_trash_lifetime: 1209600
-
- # Interval (seconds) between trash sweeps. During a trash sweep,
- # collections are marked as trash if their trash_at time has
- # arrived, and deleted if their delete_at time has arrived.
- trash_sweep_interval: 60
-
- # Interval (seconds) between asynchronous permission view updates. Any
- # permission-updating API called with the 'async' parameter schedules a an
- # update on the permission view in the future, if not already scheduled.
- async_permissions_update_interval: 20
-
- # Maximum characters of (JSON-encoded) query parameters to include
- # in each request log entry. When params exceed this size, they will
- # be JSON-encoded, truncated to this size, and logged as
- # params_truncated.
- max_request_log_params_size: 2000
-
- # Maximum size (in bytes) allowed for a single API request. This
- # limit is published in the discovery document for use by clients.
- # Note: You must separately configure the upstream web server or
- # proxy to actually enforce the desired maximum request size on the
- # server side.
- max_request_size: 134217728
-
- # Limit the number of bytes read from the database during an index
- # request (by retrieving and returning fewer rows than would
- # normally be returned in a single response).
- # Note 1: This setting never reduces the number of returned rows to
- # zero, no matter how big the first data row is.
- # Note 2: Currently, this is only checked against a specific set of
- # columns that tend to get large (collections.manifest_text,
- # containers.mounts, workflows.definition). Other fields (e.g.,
- # "properties" hashes) are not counted against this limit.
- max_index_database_read: 134217728
-
- # Maximum number of items to return when responding to a APIs that
- # can return partial result sets using limit and offset parameters
- # (e.g., *.index, groups.contents). If a request specifies a "limit"
- # parameter higher than this value, this value is used instead.
- max_items_per_response: 1000
# When you run the db:delete_old_job_logs task, it will find jobs that
# have been finished for at least this many seconds, and delete their
@@ -235,229 +25,6 @@ common:
# crunchstat logs from the logs table.
clean_container_log_rows_after: <%= 30.days %>
- # Time to keep audit logs, in seconds. (An audit log is a row added
- # to the "logs" table in the PostgreSQL database each time an
- # Arvados object is created, modified, or deleted.)
- #
- # Currently, websocket event notifications rely on audit logs, so
- # this should not be set lower than 600 (5 minutes).
- max_audit_log_age: 1209600
-
- # Maximum number of log rows to delete in a single SQL transaction.
- #
- # If max_audit_log_delete_batch is 0, log entries will never be
- # deleted by Arvados. Cleanup can be done by an external process
- # without affecting any Arvados system processes, as long as very
- # recent (<5 minutes old) logs are not deleted.
- #
- # 100000 is a reasonable batch size for most sites.
- max_audit_log_delete_batch: 0
-
- # The maximum number of compute nodes that can be in use simultaneously
- # If this limit is reduced, any existing nodes with slot number >= new limit
- # will not be counted against the new limit. In other words, the new limit
- # won't be strictly enforced until those nodes with higher slot numbers
- # go down.
- max_compute_nodes: 64
-
- # These two settings control how frequently log events are flushed to the
- # database. Log lines are buffered until either crunch_log_bytes_per_event
- # has been reached or crunch_log_seconds_between_events has elapsed since
- # the last flush.
- crunch_log_bytes_per_event: 4096
- crunch_log_seconds_between_events: 1
-
- # The sample period for throttling logs, in seconds.
- crunch_log_throttle_period: 60
-
- # Maximum number of bytes that job can log over crunch_log_throttle_period
- # before being silenced until the end of the period.
- crunch_log_throttle_bytes: 65536
-
- # Maximum number of lines that job can log over crunch_log_throttle_period
- # before being silenced until the end of the period.
- crunch_log_throttle_lines: 1024
-
- # Maximum bytes that may be logged by a single job. Log bytes that are
- # silenced by throttling are not counted against this total.
- crunch_limit_log_bytes_per_job: 67108864
-
- crunch_log_partial_line_throttle_period: 5
-
- # Container logs are written to Keep and saved in a collection,
- # which is updated periodically while the container runs. This
- # value sets the interval (given in seconds) between collection
- # updates.
- crunch_log_update_period: 1800
-
- # The log collection is also updated when the specified amount of
- # log data (given in bytes) is produced in less than one update
- # period.
- crunch_log_update_size: 33554432
-
- # Attributes to suppress in events and audit logs. Notably,
- # specifying ["manifest_text"] here typically makes the database
- # smaller and faster.
- #
- # Warning: Using any non-empty value here can have undesirable side
- # effects for any client or component that relies on event logs.
- # Use at your own risk.
- unlogged_attributes: []
-
- # API methods to disable. Disabled methods are not listed in the
- # discovery document, and respond 404 to all requests.
- # Example: ["jobs.create", "pipeline_instances.create"]
- disable_api_methods: []
-
- # Enable the legacy Jobs API.
- # auto -- (default) enable the Jobs API only if it has been used before
- # (i.e., there are job records in the database)
- # true -- enable the Jobs API despite lack of existing records.
- # false -- disable the Jobs API despite presence of existing records.
- enable_legacy_jobs_api: auto
-
- ###
- ### Crunch, DNS & compute node management
- ###
-
- # Preemptible instance support (e.g. AWS Spot Instances)
- # When true, child containers will get created with the preemptible
- # scheduling parameter parameter set.
- preemptible_instances: false
-
- # Docker image to be used when none found in runtime_constraints of a job
- default_docker_image_for_jobs: false
-
- # List of supported Docker Registry image formats that compute nodes
- # are able to use. `arv keep docker` will error out if a user tries
- # to store an image with an unsupported format. Use an empty array
- # to skip the compatibility check (and display a warning message to
- # that effect).
- #
- # Example for sites running docker < 1.10: ["v1"]
- # Example for sites running docker >= 1.10: ["v2"]
- # Example for disabling check: []
- docker_image_formats: ["v2"]
-
- # :none or :slurm_immediate
- crunch_job_wrapper: :none
-
- # username, or false = do not set uid when running jobs.
- crunch_job_user: crunch
-
- # The web service must be able to create/write this file, and
- # crunch-job must be able to stat() it.
- crunch_refresh_trigger: /tmp/crunch_refresh_trigger
-
- # Path to dns server configuration directory
- # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
- # files or touch restart.txt (see below).
- dns_server_conf_dir: false
-
- # Template file for the dns server host snippets. See
- # unbound.template in this directory for an example. If false, do
- # not write any config files.
- dns_server_conf_template: false
-
- # String to write to {dns_server_conf_dir}/restart.txt (with a
- # trailing newline) after updating local data. If false, do not
- # open or write the restart.txt file.
- dns_server_reload_command: false
-
- # Command to run after each DNS update. Template variables will be
- # substituted; see the "unbound" example below. If false, do not run
- # a command.
- dns_server_update_command: false
-
- ## Example for unbound:
- #dns_server_conf_dir: /etc/unbound/conf.d
- #dns_server_conf_template: /path/to/your/api/server/config/unbound.template
- ## ...plus one of the following two methods of reloading:
- #dns_server_reload_command: unbound-control reload
- #dns_server_update_command: echo %{hostname} %{hostname}.%{uuid_prefix} %{hostname}.%{uuid_prefix}.arvadosapi.com %{ptr_domain} | xargs -n 1 unbound-control local_data_remove && unbound-control local_data %{hostname} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix} IN A %{ip_address} && unbound-control local_data %{hostname}.%{uuid_prefix}.arvadosapi.com IN A %{ip_address} && unbound-control local_data %{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com
-
- compute_node_domain: false
- compute_node_nameservers:
- - 192.168.1.1
-
- # Hostname to assign to a compute node when it sends a "ping" and the
- # hostname in its Node record is nil.
- # During bootstrapping, the "ping" script is expected to notice the
- # hostname given in the ping response, and update its unix hostname
- # accordingly.
- # If false, leave the hostname alone (this is appropriate if your compute
- # nodes' hostnames are already assigned by some other mechanism).
- #
- # One way or another, the hostnames of your node records should agree
- # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
- #
- # Example for compute0000, compute0001, ....:
- # assign_node_hostname: compute%<slot_number>04d
- # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
- assign_node_hostname: compute%<slot_number>d
-
-
- ###
- ### Job and container reuse logic.
- ###
-
- # Include details about job reuse decisions in the server log. This
- # causes additional database queries to run, so it should not be
- # enabled unless you expect to examine the resulting logs for
- # troubleshooting purposes.
- log_reuse_decisions: false
-
- # Control job reuse behavior when two completed jobs match the
- # search criteria and have different outputs.
- #
- # If true, in case of a conflict, reuse the earliest job (this is
- # similar to container reuse behavior).
- #
- # If false, in case of a conflict, do not reuse any completed job,
- # but do reuse an already-running job if available (this is the
- # original job reuse behavior, and is still the default).
- reuse_job_if_outputs_differ: false
-
- ###
- ### Federation support.
- ###
-
- # You can enable use of this cluster by users who are authenticated
- # by a remote Arvados site. Control which remote hosts are trusted
- # to authenticate which user IDs by configuring remote_hosts,
- # remote_hosts_via_dns, or both. The default configuration disables
- # remote authentication.
-
- # Map known prefixes to hosts. For example, if user IDs beginning
- # with "zzzzz-" should be authenticated by the Arvados server at
- # "zzzzz.example.com", use:
- #
- # remote_hosts:
- # zzzzz: zzzzz.example.com
- remote_hosts: {}
-
- # Use {prefix}.arvadosapi.com for any prefix not given in
- # remote_hosts above.
- remote_hosts_via_dns: false
-
- # List of cluster prefixes. These are "trusted" clusters, users
- # from the clusters listed here will be automatically setup and
- # activated. This is separate from the settings
- # auto_setup_new_users and new_users_are_active.
- auto_activate_users_from: []
-
- ###
- ### Remaining assorted configuration options.
- ###
-
- arvados_theme: default
-
- # Permit insecure (OpenSSL::SSL::VERIFY_NONE) connections to the
- # Single Sign On (sso) server and remote Arvados sites. Should only
- # be enabled during development when the SSO server is using a
- # self-signed cert.
- sso_insecure: false
-
## Set Time.zone default to the specified zone and make Active
## Record auto-convert to this zone. Run "rake -D time" for a list
## of tasks for finding time zone names. Default is UTC.
@@ -472,17 +39,6 @@ common:
# Version of your assets, change this if you want to expire all your assets
assets.version: "1.0"
- # Allow clients to create collections by providing a manifest with
- # unsigned data blob locators. IMPORTANT: This effectively disables
- # access controls for data stored in Keep: a client who knows a hash
- # can write a manifest that references the hash, pass it to
- # collections.create (which will create a permission link), use
- # collections.get to obtain a signature for that data locator, and
- # use that signed locator to retrieve the data from Keep. Therefore,
- # do not turn this on if your users expect to keep data private from
- # one another!
- permit_create_collection_with_unsigned_manifest: false
-
default_openid_prefix: https://www.google.com/accounts/o8/id
# Override the automatic version string. With the default value of
@@ -496,42 +52,6 @@ common:
# (included in vendor packages).
package_version: false
- # Default value for container_count_max for container requests. This is the
- # number of times Arvados will create a new container to satisfy a container
- # request. If a container is cancelled it will retry a new container if
- # container_count < container_count_max on any container requests associated
- # with the cancelled container.
- container_count_max: 3
-
- # Default value for keep_cache_ram of a container's runtime_constraints.
- container_default_keep_cache_ram: 268435456
-
- # Token to be included in all healthcheck requests. Disabled by default.
- # Server expects request header of the format "Authorization: Bearer xxx"
- ManagementToken: false
-
- # URL of keep-web service. Provides read/write access to collections via
- # HTTP and WebDAV protocols.
- #
- # Example:
- # keep_web_service_url: https://download.uuid_prefix.arvadosapi.com/
- keep_web_service_url: false
-
- # If true, enable collection versioning.
- # When a collection's preserve_version field is true or the current version
- # is older than the amount of seconds defined on preserve_version_if_idle,
- # a snapshot of the collection's previous state is created and linked to
- # the current collection.
- collection_versioning: false
- # 0 = auto-create a new version on every update.
- # -1 = never auto-create new versions.
- # > 0 = auto-create a new version when older than the specified number of seconds.
- preserve_version_if_idle: -1
-
- # Number of times a container can be unlocked before being
- # automatically cancelled.
- max_container_dispatch_attempts: 5
-
development:
force_ssl: false
cache_classes: false
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index f52e50089..0a99b1afc 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -20,9 +20,9 @@ EOS
# Real values will be copied from globals by omniauth_init.rb. For
# now, assign some strings so the generic *.yml config loader
# doesn't overwrite them or complain that they're missing.
- Rails.configuration.sso_app_id = 'xxx'
- Rails.configuration.sso_app_secret = 'xxx'
- Rails.configuration.sso_provider_url = '//xxx'
+ Rails.configuration.Login["ProviderAppID"] = 'xxx'
+ Rails.configuration.Login["ProviderAppSecret"] = 'xxx'
+ Rails.configuration.Services["SSO"]["ExternalURL"] = '//xxx'
WARNED_OMNIAUTH_CONFIG = true
end
@@ -39,75 +39,110 @@ $arvados_config = {}
end
end
-config_key_map =
- {
- "git_repositories_dir": "Git.Repositories",
- "disable_api_methods": "API.DisabledAPIs",
- "max_request_size": "API.MaxRequestSize",
- "max_index_database_read": "API.MaxIndexDatabaseRead",
- "max_items_per_response": "API.MaxItemsPerResponse",
- "async_permissions_update_interval": "API.AsyncPermissionsUpdateInterval",
- "auto_setup_new_users": "Users.AutoSetupNewUsers",
- "auto_setup_new_users_with_vm_uuid": "Users.AutoSetupNewUsersWithVmUUID",
- "auto_setup_new_users_with_repository": "Users.AutoSetupNewUsersWithRepository",
- "auto_setup_name_blacklist": "Users.AutoSetupUsernameBlacklist",
- "new_users_are_active": "Users.NewUsersAreActive",
- "auto_admin_user": "Users.AutoAdminUserWithEmail",
- "auto_admin_first_user": "Users.AutoAdminFirstUser",
- "user_profile_notification_address": "Users.UserProfileNotificationAddress",
- "admin_notifier_email_from": "Users.AdminNotifierEmailFrom",
- "email_subject_prefix": "Users.EmailSubjectPrefix",
- "user_notifier_email_from": "Users.UserNotifierEmailFrom",
- "new_user_notification_recipients": "Users.NewUserNotificationRecipients",
- "new_inactive_user_notification_recipients": "Users.NewInactiveUserNotificationRecipients",
- "sso_app_secret": "Login.ProviderAppSecret",
- "sso_app_id": "Login.ProviderAppID",
- "max_audit_log_age": "AuditLogs.MaxAge",
- "max_audit_log_delete_batch": "AuditLogs.MaxDeleteBatch",
- "unlogged_attributes": "AuditLogs.UnloggedAttributes",
- "max_request_log_params_size": "SystemLogs.MaxRequestLogParamsSize",
- "default_collection_replication": "Collections.DefaultReplication",
- "default_trash_lifetime": "Collections.DefaultTrashLifetime",
- "collection_versioning": "Collections.CollectionVersioning",
- "preserve_version_if_idle": "Collections.PreserveVersionIfIdle",
- "trash_sweep_interval": "Collections.TrashSweepInterval",
- "blob_signing_key": "Collections.BlobSigningKey",
- "blob_signature_ttl": "Collections.BlobSigningTTL",
- "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning", # XXX
- "docker_image_formats": "Containers.SupportedDockerImageFormats",
- "log_reuse_decisions": "Containers.LogReuseDecisions",
- "container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM",
- "max_container_dispatch_attempts": "Containers.MaxDispatchAttempts",
- "container_count_max": "Containers.MaxRetryAttempts",
- "preemptible_instances": "Containers.UsePreemptibleInstances",
- "max_compute_nodes": "Containers.MaxComputeVMs",
- "crunch_log_bytes_per_event": "Containers.Logging.LogBytesPerEvent",
- "crunch_log_seconds_between_events": "Containers.Logging.LogSecondsBetweenEvents",
- "crunch_log_throttle_period": "Containers.Logging.LogThrottlePeriod",
- "crunch_log_throttle_bytes": "Containers.Logging.LogThrottleBytes",
- "crunch_log_throttle_lines": "Containers.Logging.LogThrottleLines",
- "crunch_limit_log_bytes_per_job": "Containers.Logging.LimitLogBytesPerJob",
- "crunch_log_partial_line_throttle_period": "Containers.Logging.LogPartialLineThrottlePeriod",
- "crunch_log_update_period": "Containers.Logging.LogUpdatePeriod",
- "crunch_log_update_size": "Containers.Logging.LogUpdateSize",
- "clean_container_log_rows_after": "Containers.Logging.MaxAge",
- "dns_server_conf_dir": "Containers.SLURM.Managed.DNSServerConfDir",
- "dns_server_conf_template": "Containers.SLURM.Managed.DNSServerConfTemplate",
- "dns_server_reload_command": "Containers.SLURM.Managed.DNSServerReloadCommand",
- "dns_server_update_command": "Containers.SLURM.Managed.DNSServerUpdateCommand",
- "compute_node_domain": "Containers.SLURM.Managed.ComputeNodeDomain",
- "compute_node_nameservers": "Containers.SLURM.Managed.ComputeNodeNameservers",
- "assign_node_hostname": "Containers.SLURM.Managed.AssignNodeHostname",
- "enable_legacy_jobs_api": "Containers.JobsAPI.Enable",
- "crunch_job_wrapper": "Containers.JobsAPI.CrunchJobWrapper",
- "crunch_job_user": "Containers.JobsAPI.CrunchJobUser",
- "crunch_refresh_trigger": "Containers.JobsAPI.CrunchRefreshTrigger",
- "git_internal_dir": "Containers.JobsAPI.GitInternalDir",
- "reuse_job_if_outputs_differ": "Containers.JobsAPI.ReuseJobIfOutputsDiffer",
- "default_docker_image_for_jobs": "Containers.JobsAPI.DefaultDockerImage",
- "mailchimp_api_key": "Mail.MailchimpAPIKey",
- "mailchimp_list_id": "Mail.MailchimpListID",
-}
+def set_cfg cfg, k, v
+ # "foo.bar: baz" --> { config.foo.bar = baz }
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+ if !cfg.nil?
+ cfg[k] = v
+ end
+end
+
+$config_migrate_map = {}
+$config_types = {}
+def declare_config(assign_to, configtype, migrate_from=nil)
+ if migrate_from
+ $config_migrate_map[migrate_from] = ->(cfg, k, v) {
+ set_cfg cfg, assign_to, v
+ }
+ end
+ $config_types[assign_to] = configtype
+end
+
+module Boolean; end
+class TrueClass; include Boolean; end
+class FalseClass; include Boolean; end
+
+declare_config "ClusterID", String, :uuid_prefix
+declare_config "Git.Repositories", String, :git_repositories_dir
+declare_config "API.DisabledAPIs", Array, :disable_api_methods
+declare_config "API.MaxRequestSize", Integer, :max_request_size
+declare_config "API.MaxIndexDatabaseRead", Integer, :max_index_database_read
+declare_config "API.MaxItemsPerResponse", Integer, :max_items_per_response
+declare_config "API.AsyncPermissionsUpdateInterval", ActiveSupport::Duration, :async_permissions_update_interval
+declare_config "Users.AutoSetupNewUsers", Boolean, :auto_setup_new_users
+declare_config "Users.AutoSetupNewUsersWithVmUUID", String, :auto_setup_new_users_with_vm_uuid
+declare_config "Users.AutoSetupNewUsersWithRepository", Boolean, :auto_setup_new_users_with_repository
+declare_config "Users.AutoSetupUsernameBlacklist", Array, :auto_setup_name_blacklist
+declare_config "Users.NewUsersAreActive", Boolean, :new_users_are_active
+declare_config "Users.AutoAdminUserWithEmail", String, :auto_admin_user
+declare_config "Users.AutoAdminFirstUser", Boolean, :auto_admin_first_user
+declare_config "Users.UserProfileNotificationAddress", String, :user_profile_notification_address
+declare_config "Users.AdminNotifierEmailFrom", String, :admin_notifier_email_from
+declare_config "Users.EmailSubjectPrefix", String, :email_subject_prefix
+declare_config "Users.UserNotifierEmailFrom", String, :user_notifier_email_from
+declare_config "Users.NewUserNotificationRecipients", Array, :new_user_notification_recipients
+declare_config "Users.NewInactiveUserNotificationRecipients", Array, :new_inactive_user_notification_recipients
+declare_config "Login.ProviderAppSecret", String, :sso_app_secret
+declare_config "Login.ProviderAppID", String, :sso_app_id
+declare_config "TLS.Insecure", Boolean, :sso_insecure
+declare_config "Services.SSO.ExternalURL", String, :sso_provider_url
+declare_config "AuditLogs.MaxAge", ActiveSupport::Duration, :max_audit_log_age
+declare_config "AuditLogs.MaxDeleteBatch", Integer, :max_audit_log_delete_batch
+declare_config "AuditLogs.UnloggedAttributes", Array, :unlogged_attributes
+declare_config "SystemLogs.MaxRequestLogParamsSize", Integer, :max_request_log_params_size
+declare_config "Collections.DefaultReplication", Integer, :default_collection_replication
+declare_config "Collections.DefaultTrashLifetime", ActiveSupport::Duration, :default_trash_lifetime
+declare_config "Collections.CollectionVersioning", Boolean, :collection_versioning
+declare_config "Collections.PreserveVersionIfIdle", ActiveSupport::Duration, :preserve_version_if_idle
+declare_config "Collections.TrashSweepInterval", ActiveSupport::Duration, :trash_sweep_interval
+declare_config "Collections.BlobSigningKey", String, :blob_signing_key
+declare_config "Collections.BlobSigningTTL", Integer, :blob_signature_ttl
+declare_config "Collections.BlobSigning", Boolean, :permit_create_collection_with_unsigned_manifest
+declare_config "Containers.SupportedDockerImageFormats", Array, :docker_image_formats
+declare_config "Containers.LogReuseDecisions", Boolean, :log_reuse_decisions
+declare_config "Containers.DefaultKeepCacheRAM", Integer, :container_default_keep_cache_ram
+declare_config "Containers.MaxDispatchAttempts", Integer, :max_container_dispatch_attempts
+declare_config "Containers.MaxRetryAttempts", Integer, :container_count_max
+declare_config "Containers.UsePreemptibleInstances", Boolean, :preemptible_instances
+declare_config "Containers.MaxComputeVMs", Integer, :max_compute_nodes
+declare_config "Containers.Logging.LogBytesPerEvent", Integer, :crunch_log_bytes_per_event
+declare_config "Containers.Logging.LogSecondsBetweenEvents", ActiveSupport::Duration, :crunch_log_seconds_between_events
+declare_config "Containers.Logging.LogThrottlePeriod", ActiveSupport::Duration, :crunch_log_throttle_period
+declare_config "Containers.Logging.LogThrottleBytes", Integer, :crunch_log_throttle_bytes
+declare_config "Containers.Logging.LogThrottleLines", Integer, :crunch_log_throttle_lines
+declare_config "Containers.Logging.LimitLogBytesPerJob", Integer, :crunch_limit_log_bytes_per_job
+declare_config "Containers.Logging.LogPartialLineThrottlePeriod", ActiveSupport::Duration, :crunch_log_partial_line_throttle_period
+declare_config "Containers.Logging.LogUpdatePeriod", ActiveSupport::Duration, :crunch_log_update_period
+declare_config "Containers.Logging.LogUpdateSize", Integer, :crunch_log_update_size
+declare_config "Containers.Logging.MaxAge", ActiveSupport::Duration, :clean_container_log_rows_after
+declare_config "Containers.SLURM.Managed.DNSServerConfDir", String, :dns_server_conf_dir
+declare_config "Containers.SLURM.Managed.DNSServerConfTemplate", String, :dns_server_conf_template
+declare_config "Containers.SLURM.Managed.DNSServerReloadCommand", String, :dns_server_reload_command
+declare_config "Containers.SLURM.Managed.DNSServerUpdateCommand", String, :dns_server_update_command
+declare_config "Containers.SLURM.Managed.ComputeNodeDomain", String, :compute_node_domain
+declare_config "Containers.SLURM.Managed.ComputeNodeNameservers", Array, :compute_node_nameservers
+declare_config "Containers.SLURM.Managed.AssignNodeHostname", String, :assign_node_hostname
+declare_config "Containers.JobsAPI.Enable", String, :enable_legacy_jobs_api
+declare_config "Containers.JobsAPI.CrunchJobWrapper", String, :crunch_job_wrapper
+declare_config "Containers.JobsAPI.CrunchJobUser", String, :crunch_job_user
+declare_config "Containers.JobsAPI.CrunchRefreshTrigger", String, :crunch_refresh_trigger
+declare_config "Containers.JobsAPI.GitInternalDir", String, :git_internal_dir
+declare_config "Containers.JobsAPI.ReuseJobIfOutputsDiffer", Boolean, :reuse_job_if_outputs_differ
+declare_config "Containers.JobsAPI.DefaultDockerImage", String, :default_docker_image_for_jobs
+declare_config "Mail.MailchimpAPIKey", String, :mailchimp_api_key
+declare_config "Mail.MailchimpListID", String, :mailchimp_list_id
+declare_config "Services.Workbench1.ExternalURL", String, :workbench_address
+declare_config "Services.Websocket.ExternalURL", String, :websocket_address
+declare_config "Services.WebDAV.ExternalURL", String, :keep_web_service_url
+declare_config "Services.GitHTTP.ExternalURL", String, :git_repo_https_base
+declare_config "Services.GitSSH.ExternalURL", String, :git_repo_ssh_base
application_config = {}
%w(application.default application).each do |cfgfile|
@@ -123,13 +158,16 @@ application_config = {}
end
application_config.each do |k, v|
- cfg = $arvados_config
-
- if config_key_map[k.to_sym]
- k = config_key_map[k.to_sym]
+ if $config_migrate_map[k.to_sym]
+ $config_migrate_map[k.to_sym].call $arvados_config, k, v
+ else
+ set_cfg $arvados_config, k, v
end
+end
- # "foo.bar: baz" --> { config.foo.bar = baz }
+$config_types.each do |cfgkey, cfgtype|
+ cfg = $arvados_config
+ k = cfgkey
ks = k.split '.'
k = ks.pop
ks.each do |kk|
@@ -138,12 +176,25 @@ application_config.each do |k, v|
break
end
end
- if !cfg.nil?
- cfg[k] = v
+ if cfgtype == String and !cfg[k]
+ cfg[k] = ""
+ end
+ if cfgtype == ActiveSupport::Duration
+ if cfg[k].is_a? Integer
+ cfg[k] = cfg[k].seconds
+ elsif cfg[k].is_a? String
+ # TODO handle suffixes
+ end
end
-end
-puts $arvados_config.to_yaml
+ if cfg.nil?
+ raise "missing #{cfgkey}"
+ end
+
+ if !cfg[k].is_a? cfgtype
+ raise "#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}"
+ end
+end
Server::Application.configure do
nils = []
diff --git a/services/api/config/initializers/lograge.rb b/services/api/config/initializers/lograge.rb
index ef4e428bf..07dba3aef 100644
--- a/services/api/config/initializers/lograge.rb
+++ b/services/api/config/initializers/lograge.rb
@@ -38,8 +38,8 @@ Server::Application.configure do
end
params_s = SafeJSON.dump(params)
- if params_s.length > Rails.configuration.max_request_log_params_size
- payload[:params_truncated] = params_s[0..Rails.configuration.max_request_log_params_size] + "[...]"
+ if params_s.length > Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]
+ payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs["MaxRequestLogParamsSize"]] + "[...]"
else
payload[:params] = params
end
diff --git a/services/api/config/initializers/omniauth_init.rb b/services/api/config/initializers/omniauth_init.rb
index b5e98943d..5610999a9 100644
--- a/services/api/config/initializers/omniauth_init.rb
+++ b/services/api/config/initializers/omniauth_init.rb
@@ -9,15 +9,15 @@
if defined? CUSTOM_PROVIDER_URL
Rails.logger.warn "Copying omniauth from globals in legacy config file."
- Rails.configuration.sso_app_id = APP_ID
- Rails.configuration.sso_app_secret = APP_SECRET
- Rails.configuration.sso_provider_url = CUSTOM_PROVIDER_URL
+ Rails.configuration.Login["ProviderAppID"] = APP_ID
+ Rails.configuration.Login["ProviderAppSecret"] = APP_SECRET
+ Rails.configuration.Services["SSO"]["ExternalURL"] = CUSTOM_PROVIDER_URL
else
Rails.application.config.middleware.use OmniAuth::Builder do
provider(:josh_id,
- Rails.configuration.sso_app_id,
- Rails.configuration.sso_app_secret,
- Rails.configuration.sso_provider_url)
+ Rails.configuration.Login["ProviderAppID"],
+ Rails.configuration.Login["ProviderAppSecret"],
+ Rails.configuration.Services["SSO"]["ExternalURL"])
end
OmniAuth.config.on_failure = StaticController.action(:login_failure)
end
diff --git a/services/api/lib/tasks/config_check.rake b/services/api/lib/tasks/config_check.rake
index 4f071f11a..c42c37edb 100644
--- a/services/api/lib/tasks/config_check.rake
+++ b/services/api/lib/tasks/config_check.rake
@@ -21,8 +21,8 @@ namespace :config do
end
end
# default_trash_lifetime cannot be less than 24 hours
- if Rails.configuration.default_trash_lifetime < 86400 then
- raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.default_trash_lifetime
+ if Rails.configuration.Collections["DefaultTrashLifetime"] < 86400 then
+ raise "default_trash_lifetime is %d, must be at least 86400" % Rails.configuration.Collections["DefaultTrashLifetime"]
end
end
end
commit 7baff32e82b0bfc961dc9a285da8ce187d4fe0b6
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Thu Mar 21 12:38:09 2019 -0400
13996: More config updates
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/app/models/api_client_authorization.rb b/services/api/app/models/api_client_authorization.rb
index 38538cb4f..fc8ae5282 100644
--- a/services/api/app/models/api_client_authorization.rb
+++ b/services/api/app/models/api_client_authorization.rb
@@ -94,7 +94,7 @@ class ApiClientAuthorization < ArvadosModel
def self.validate(token:, remote: nil)
return nil if !token
- remote ||= Rails.configuration.uuid_prefix
+ remote ||= Rails.configuration.ClusterID
case token[0..2]
when 'v2/'
@@ -134,7 +134,7 @@ class ApiClientAuthorization < ArvadosModel
end
uuid_prefix = uuid[0..4]
- if uuid_prefix == Rails.configuration.uuid_prefix
+ if uuid_prefix == Rails.configuration.ClusterID
# If the token were valid, we would have validated it above
return nil
elsif uuid_prefix.length != 5
@@ -153,7 +153,7 @@ class ApiClientAuthorization < ArvadosModel
# [re]validate it.
begin
clnt = HTTPClient.new
- if Rails.configuration.sso_insecure
+ if Rails.configuration.TLS["Insecure"]
clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE
else
# Use system CA certificates
@@ -164,7 +164,7 @@ class ApiClientAuthorization < ArvadosModel
end
remote_user = SafeJSON.load(
clnt.get_content('https://' + host + '/arvados/v1/users/current',
- {'remote' => Rails.configuration.uuid_prefix},
+ {'remote' => Rails.configuration.ClusterID},
{'Authorization' => 'Bearer ' + token}))
rescue => e
Rails.logger.warn "remote authentication with token #{token.inspect} failed: #{e}"
@@ -187,7 +187,7 @@ class ApiClientAuthorization < ArvadosModel
end
end
- if Rails.configuration.new_users_are_active ||
+ if Rails.configuration.Users["NewUsersAreActive"] ||
Rails.configuration.auto_activate_users_from.include?(remote_user['uuid'][0..4])
# Update is_active to whatever it is at the remote end
user.is_active = remote_user['is_active']
diff --git a/services/api/app/models/collection.rb b/services/api/app/models/collection.rb
index 590228b1a..536653fa1 100644
--- a/services/api/app/models/collection.rb
+++ b/services/api/app/models/collection.rb
@@ -125,7 +125,7 @@ class Collection < ArvadosModel
# Signature provided, but verify_signature did not like it.
logger.warn "Invalid signature on locator #{tok}"
raise ArvadosModel::PermissionDeniedError
- elsif Rails.configuration.permit_create_collection_with_unsigned_manifest
+ elsif Rails.configuration.Collections["BlobSigning"]
# No signature provided, but we are running in insecure mode.
logger.debug "Missing signature on locator #{tok} ignored"
elsif Blob.new(tok).empty?
@@ -323,9 +323,9 @@ class Collection < ArvadosModel
end
def should_preserve_version?
- return false unless (Rails.configuration.collection_versioning && versionable_updates?(self.changes.keys))
+ return false unless (Rails.configuration.Collections["CollectionVersioning"] && versionable_updates?(self.changes.keys))
- idle_threshold = Rails.configuration.preserve_version_if_idle
+ idle_threshold = Rails.configuration.Collections["PreserveVersionIfIdle"]
if !self.preserve_version_was &&
(idle_threshold < 0 ||
(idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds))
@@ -371,7 +371,7 @@ class Collection < ArvadosModel
return manifest_text
else
token = Thread.current[:token]
- exp = [db_current_time.to_i + Rails.configuration.blob_signature_ttl,
+ exp = [db_current_time.to_i + Rails.configuration.Collections["BlobSigningTTL"],
trash_at].compact.map(&:to_i).min
self.class.sign_manifest manifest_text, token, exp
end
@@ -379,7 +379,7 @@ class Collection < ArvadosModel
def self.sign_manifest manifest, token, exp=nil
if exp.nil?
- exp = db_current_time.to_i + Rails.configuration.blob_signature_ttl
+ exp = db_current_time.to_i + Rails.configuration.Collections["BlobSigningTTL"]
end
signing_opts = {
api_token: token,
@@ -489,7 +489,7 @@ class Collection < ArvadosModel
#
# If filter_compatible_format is true (the default), only return image
# collections which are support by the installation as indicated by
- # Rails.configuration.docker_image_formats. Will follow
+ # Rails.configuration.Containers["SupportedDockerImageFormats"]. Will follow
# 'docker_image_migration' links if search_term resolves to an incompatible
# image, but an equivalent compatible image is available.
def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true)
@@ -500,15 +500,17 @@ class Collection < ArvadosModel
joins("JOIN collections ON links.head_uuid = collections.uuid").
order("links.created_at DESC")
- if (Rails.configuration.docker_image_formats.include? 'v1' and
- Rails.configuration.docker_image_formats.include? 'v2') or filter_compatible_format == false
+ docker_image_formats = Rails.configuration.Containers["SupportedDockerImageFormats"]
+
+ if (docker_image_formats.include? 'v1' and
+ docker_image_formats.include? 'v2') or filter_compatible_format == false
pattern = /^(sha256:)?[0-9A-Fa-f]{64}\.tar$/
- elsif Rails.configuration.docker_image_formats.include? 'v2'
+ elsif docker_image_formats.include? 'v2'
pattern = /^(sha256:)[0-9A-Fa-f]{64}\.tar$/
- elsif Rails.configuration.docker_image_formats.include? 'v1'
+ elsif docker_image_formats.include? 'v1'
pattern = /^[0-9A-Fa-f]{64}\.tar$/
else
- raise "Unrecognized configuration for docker_image_formats #{Rails.configuration.docker_image_formats}"
+ raise "Unrecognized configuration for docker_image_formats #{docker_image_formats}"
end
# If the search term is a Collection locator that contains one file
diff --git a/services/api/app/models/commit_ancestor.rb b/services/api/app/models/commit_ancestor.rb
index 3d5152c3f..60798f103 100644
--- a/services/api/app/models/commit_ancestor.rb
+++ b/services/api/app/models/commit_ancestor.rb
@@ -17,7 +17,7 @@ class CommitAncestor < ActiveRecord::Base
protected
def ask_git_whether_is
- @gitdirbase = Rails.configuration.git_repositories_dir
+ @gitdirbase = Rails.configuration.Git["Repositories"]
self.is = nil
Dir.foreach @gitdirbase do |repo|
next if repo.match(/^\./)
diff --git a/services/api/app/models/container.rb b/services/api/app/models/container.rb
index fb900a993..a95a166dd 100644
--- a/services/api/app/models/container.rb
+++ b/services/api/app/models/container.rb
@@ -205,7 +205,7 @@ class Container < ArvadosModel
rc = {}
defaults = {
'keep_cache_ram' =>
- Rails.configuration.container_default_keep_cache_ram,
+ Rails.configuration.Containers["DefaultKeepCacheRAM"],
}
defaults.merge(runtime_constraints).each do |k, v|
if v.is_a? Array
@@ -368,7 +368,7 @@ class Container < ArvadosModel
transaction do
reload(lock: 'FOR UPDATE')
check_unlock_fail
- if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+ if self.lock_count < Rails.configuration.Containers["MaxDispatchAttempts"]
update_attributes!(state: Queued)
else
update_attributes!(state: Cancelled,
diff --git a/services/api/app/models/container_request.rb b/services/api/app/models/container_request.rb
index 292decafb..f2e3d3122 100644
--- a/services/api/app/models/container_request.rb
+++ b/services/api/app/models/container_request.rb
@@ -196,7 +196,7 @@ class ContainerRequest < ArvadosModel
self.mounts ||= {}
self.secret_mounts ||= {}
self.cwd ||= "."
- self.container_count_max ||= Rails.configuration.container_count_max
+ self.container_count_max ||= Rails.configuration.Containers["MaxComputeVMs"]
self.scheduling_parameters ||= {}
self.output_ttl ||= 0
self.priority ||= 0
@@ -252,7 +252,7 @@ class ContainerRequest < ArvadosModel
if self.state == Committed
# If preemptible instances (eg: AWS Spot Instances) are allowed,
# ask them on child containers by default.
- if Rails.configuration.preemptible_instances and !c.nil? and
+ if Rails.configuration.Containers["UsePreemptibleInstances"] and !c.nil? and
self.scheduling_parameters['preemptible'].nil?
self.scheduling_parameters['preemptible'] = true
end
@@ -322,7 +322,7 @@ class ContainerRequest < ArvadosModel
scheduling_parameters['partitions'].size)
errors.add :scheduling_parameters, "partitions must be an array of strings"
end
- if !Rails.configuration.preemptible_instances and scheduling_parameters['preemptible']
+ if !Rails.configuration.Containers["UsePreemptibleInstances"] and scheduling_parameters['preemptible']
errors.add :scheduling_parameters, "preemptible instances are not allowed"
end
if scheduling_parameters.include? 'max_run_time' and
diff --git a/services/api/app/models/node.rb b/services/api/app/models/node.rb
index 148dffc23..3c4712fde 100644
--- a/services/api/app/models/node.rb
+++ b/services/api/app/models/node.rb
@@ -39,7 +39,7 @@ class Node < ArvadosModel
api_accessible :superuser, :extend => :user do |t|
t.add :first_ping_at
t.add :info
- t.add lambda { |x| Rails.configuration.compute_node_nameservers }, :as => :nameservers
+ t.add lambda { |x| Rails.configuration.Containers["SLURM"]["Managed"]["ComputeNodeNameservers"] }, :as => :nameservers
end
after_initialize do
@@ -47,7 +47,7 @@ class Node < ArvadosModel
end
def domain
- super || Rails.configuration.compute_node_domain
+ super || Rails.configuration.Containers["SLURM"]["Managed"]["ComputeNodeDomain"]
end
def api_job_uuid
@@ -143,7 +143,7 @@ class Node < ArvadosModel
protected
def assign_hostname
- if self.hostname.nil? and Rails.configuration.assign_node_hostname
+ if self.hostname.nil? and Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"]
self.hostname = self.class.hostname_for_slot(self.slot_number)
end
end
@@ -159,7 +159,7 @@ class Node < ArvadosModel
# query label:
'Node.available_slot_number',
# [col_id, val] for $1 vars:
- [[nil, Rails.configuration.max_compute_nodes]],
+ [[nil, Rails.configuration.Containers["MaxComputeVMs"]]],
).rows.first.andand.first
end
@@ -194,24 +194,24 @@ class Node < ArvadosModel
template_vars = {
hostname: hostname,
- uuid_prefix: Rails.configuration.uuid_prefix,
+ uuid_prefix: Rails.configuration.ClusterID,
ip_address: ip_address,
ptr_domain: ptr_domain,
}
- if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template
+ if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]
tmpfile = nil
begin
begin
- template = IO.read(Rails.configuration.dns_server_conf_template)
+ template = IO.read(Rails.configuration.Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"])
rescue IOError, SystemCallError => e
- logger.error "Reading #{Rails.configuration.dns_server_conf_template}: #{e.message}"
+ logger.error "Reading #{Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"]}: #{e.message}"
raise
end
- hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+ hostfile = File.join Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], "#{hostname}.conf"
Tempfile.open(["#{hostname}-", ".conf.tmp"],
- Rails.configuration.dns_server_conf_dir) do |f|
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"]) do |f|
tmpfile = f.path
f.puts template % template_vars
end
@@ -227,20 +227,20 @@ class Node < ArvadosModel
end
end
- if Rails.configuration.dns_server_update_command
- cmd = Rails.configuration.dns_server_update_command % template_vars
+ if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"]
+ cmd = Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerUpdateCommand"] % template_vars
if not system cmd
logger.error "dns_server_update_command #{cmd.inspect} failed: #{$?}"
ok = false
end
end
- if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_reload_command
- restartfile = File.join(Rails.configuration.dns_server_conf_dir, 'restart.txt')
+ if Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"]
+ restartfile = File.join(Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], 'restart.txt')
begin
File.open(restartfile, 'w') do |f|
# Typically, this is used to trigger a dns server restart
- f.puts Rails.configuration.dns_server_reload_command
+ f.puts Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerReloadCommand"]
end
rescue IOError, SystemCallError => e
logger.error "Unable to write #{restartfile}: #{e.message}"
@@ -252,7 +252,7 @@ class Node < ArvadosModel
end
def self.hostname_for_slot(slot_number)
- config = Rails.configuration.assign_node_hostname
+ config = Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"]
return nil if !config
@@ -261,10 +261,13 @@ class Node < ArvadosModel
# At startup, make sure all DNS entries exist. Otherwise, slurmctld
# will refuse to start.
- if Rails.configuration.dns_server_conf_dir and Rails.configuration.dns_server_conf_template and Rails.configuration.assign_node_hostname
- (0..Rails.configuration.max_compute_nodes-1).each do |slot_number|
+ if (Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"] and
+ Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfTemplate"] and
+ Rails.configuration.Containers["SLURM"]["Managed"]["AssignNodeHostname"])
+
+ (0..Rails.configuration.Containers["MaxComputeVMs"]-1).each do |slot_number|
hostname = hostname_for_slot(slot_number)
- hostfile = File.join Rails.configuration.dns_server_conf_dir, "#{hostname}.conf"
+ hostfile = File.join Rails.configuration.Containers["SLURM"]["Managed"]["DNSServerConfDir"], "#{hostname}.conf"
if !File.exist? hostfile
n = Node.where(:slot_number => slot_number).first
if n.nil? or n.ip_address.nil?
diff --git a/services/api/app/views/admin_notifier/new_user.text.erb b/services/api/app/views/admin_notifier/new_user.text.erb
index d21513f7f..20a36afcb 100644
--- a/services/api/app/views/admin_notifier/new_user.text.erb
+++ b/services/api/app/views/admin_notifier/new_user.text.erb
@@ -4,7 +4,7 @@ SPDX-License-Identifier: AGPL-3.0 %>
<%
add_to_message = ''
- if Rails.configuration.auto_setup_new_users
+ if Rails.configuration.Users["AutoSetupNewUsers"]
add_to_message = @user.is_invited ? ' and setup' : ', but not setup'
end
%>
@@ -22,4 +22,3 @@ Please see workbench for more information:
<% end -%>
Thanks,
Your friendly Arvados robot.
-
diff --git a/services/api/lib/audit_logs.rb b/services/api/lib/audit_logs.rb
index 56fd935f3..4116ae0df 100644
--- a/services/api/lib/audit_logs.rb
+++ b/services/api/lib/audit_logs.rb
@@ -44,8 +44,8 @@ module AuditLogs
end
def self.tidy_in_background
- max_age = Rails.configuration.max_audit_log_age
- max_batch = Rails.configuration.max_audit_log_delete_batch
+ max_age = Rails.configuration.AuditLogs["MaxAge"]
+ max_batch = Rails.configuration.AuditLogs["MaxDeleteBatch"]
return if max_age <= 0 || max_batch <= 0
exp = (max_age/14).seconds
diff --git a/services/api/lib/crunch_dispatch.rb b/services/api/lib/crunch_dispatch.rb
index 449d7d516..eceada5a7 100644
--- a/services/api/lib/crunch_dispatch.rb
+++ b/services/api/lib/crunch_dispatch.rb
@@ -31,13 +31,13 @@ class CrunchDispatch
@cgroup_root = ENV['CRUNCH_CGROUP_ROOT']
@srun_sync_timeout = ENV['CRUNCH_SRUN_SYNC_TIMEOUT']
- @arvados_internal = Rails.configuration.git_internal_dir
+ @arvados_internal = Rails.configuration.Containers["JobsAPI"]["GitInternalDir"]
if not File.exist? @arvados_internal
$stderr.puts `mkdir -p #{@arvados_internal.shellescape} && git init --bare #{@arvados_internal.shellescape}`
raise "No internal git repository available" unless ($? == 0)
end
- @repo_root = Rails.configuration.git_repositories_dir
+ @repo_root = Rails.configuration.Git["Repositories"]
@arvados_repo_path = Repository.where(name: "arvados").first.server_path
@authorizations = {}
@did_recently = {}
@@ -460,7 +460,7 @@ class CrunchDispatch
bytes_logged: 0,
events_logged: 0,
log_throttle_is_open: true,
- log_throttle_reset_time: Time.now + Rails.configuration.crunch_log_throttle_period,
+ log_throttle_reset_time: Time.now + Rails.configuration.Containers["Logging"]["LogThrottlePeriod"],
log_throttle_bytes_so_far: 0,
log_throttle_lines_so_far: 0,
log_throttle_bytes_skipped: 0,
@@ -485,7 +485,7 @@ class CrunchDispatch
matches = line.match(/^\S+ \S+ \d+ \d+ stderr (.*)/)
if matches and matches[1] and matches[1].start_with?('[...]') and matches[1].end_with?('[...]')
partial_line = true
- if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.crunch_log_partial_line_throttle_period
+ if Time.now > running_job[:log_throttle_partial_line_last_at] + Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"]
running_job[:log_throttle_partial_line_last_at] = Time.now
else
skip_counts = true
@@ -499,26 +499,26 @@ class CrunchDispatch
end
if (running_job[:bytes_logged] >
- Rails.configuration.crunch_limit_log_bytes_per_job)
- message = "Exceeded log limit #{Rails.configuration.crunch_limit_log_bytes_per_job} bytes (crunch_limit_log_bytes_per_job). Log will be truncated."
+ Rails.configuration.Containers["Logging"]["LimitLogBytesPerJob"])
+ message = "Exceeded log limit #{Rails.configuration.Containers["Logging"]["LimitLogBytesPerJob"]} bytes (LimitLogBytesPerJob). Log will be truncated."
running_job[:log_throttle_reset_time] = Time.now + 100.years
running_job[:log_throttle_is_open] = false
elsif (running_job[:log_throttle_bytes_so_far] >
- Rails.configuration.crunch_log_throttle_bytes)
+ Rails.configuration.Containers["Logging"]["LogThrottleBytes"])
remaining_time = running_job[:log_throttle_reset_time] - Time.now
- message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_bytes} bytes per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_bytes). Logging will be silenced for the next #{remaining_time.round} seconds."
+ message = "Exceeded rate #{Rails.configuration.Containers["Logging"]["LogThrottleBytes"]} bytes per #{Rails.configuration.Containers["Logging"]["LogThrottlePeriod"]} seconds (LogThrottleBytes). Logging will be silenced for the next #{remaining_time.round} seconds."
running_job[:log_throttle_is_open] = false
elsif (running_job[:log_throttle_lines_so_far] >
- Rails.configuration.crunch_log_throttle_lines)
+ Rails.configuration.Containers["Logging"]["LogThrottleLines"])
remaining_time = running_job[:log_throttle_reset_time] - Time.now
- message = "Exceeded rate #{Rails.configuration.crunch_log_throttle_lines} lines per #{Rails.configuration.crunch_log_throttle_period} seconds (crunch_log_throttle_lines), logging will be silenced for the next #{remaining_time.round} seconds."
+ message = "Exceeded rate #{Rails.configuration.Containers["Logging"]["LogThrottleLines"]} lines per #{Rails.configuration.Containers["Logging"]["LogThrottlePeriod"]} seconds (LogThrottleLines), logging will be silenced for the next #{remaining_time.round} seconds."
running_job[:log_throttle_is_open] = false
elsif partial_line and running_job[:log_throttle_first_partial_line]
running_job[:log_throttle_first_partial_line] = false
- message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.crunch_log_partial_line_throttle_period} seconds."
+ message = "Rate-limiting partial segments of long lines to one every #{Rails.configuration.Containers["Logging"]["LogPartialLineThrottlePeriod"]} seconds."
end
end
@@ -552,7 +552,7 @@ class CrunchDispatch
j[:stderr_buf_to_flush] << "#{LogTime.now} #{message}\n"
end
- j[:log_throttle_reset_time] = now + Rails.configuration.crunch_log_throttle_period
+ j[:log_throttle_reset_time] = now + Rails.configuration.Containers["Logging"]["LogThrottlePeriod"]
j[:log_throttle_bytes_so_far] = 0
j[:log_throttle_lines_so_far] = 0
j[:log_throttle_bytes_skipped] = 0
@@ -592,7 +592,7 @@ class CrunchDispatch
bufend = ''
streambuf.each_line do |line|
if not line.end_with? $/
- if line.size > Rails.configuration.crunch_log_throttle_bytes
+ if line.size > Rails.configuration.Containers["Logging"]["LogThrottleBytes"]
# Without a limit here, we'll use 2x an arbitrary amount
# of memory, and waste a lot of time copying strings
# around, all without providing any feedback to anyone
@@ -775,7 +775,7 @@ class CrunchDispatch
# This is how crunch-job child procs know where the "refresh"
# trigger file is
- ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.crunch_refresh_trigger
+ ENV["CRUNCH_REFRESH_TRIGGER"] = Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"]
# If salloc can't allocate resources immediately, make it use our
# temporary failure exit code. This ensures crunch-dispatch won't
@@ -937,8 +937,8 @@ class CrunchDispatch
# Send out to log event if buffer size exceeds the bytes per event or if
# it has been at least crunch_log_seconds_between_events seconds since
# the last flush.
- if running_job[:stderr_buf_to_flush].size > Rails.configuration.crunch_log_bytes_per_event or
- (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.crunch_log_seconds_between_events
+ if running_job[:stderr_buf_to_flush].size > Rails.configuration.Containers["Logging"]["LogBytesPerEvent"] or
+ (Time.now - running_job[:stderr_flushed_at]) >= Rails.configuration.Containers["Logging"]["LogSecondsBetweenEvents"]
begin
log = Log.new(object_uuid: running_job[:job].uuid,
event_type: 'stderr',
@@ -957,7 +957,7 @@ class CrunchDispatch
# An array of job_uuids in squeue
def squeue_jobs
- if Rails.configuration.crunch_job_wrapper == :slurm_immediate
+ if Rails.configuration.Containers["JobsAPI"]["CrunchJobWrapper"].to_sym == :slurm_immediate
p = IO.popen(['squeue', '-a', '-h', '-o', '%j'])
begin
p.readlines.map {|line| line.strip}
diff --git a/services/api/lib/enable_jobs_api.rb b/services/api/lib/enable_jobs_api.rb
index 63543ab3a..4da6188d1 100644
--- a/services/api/lib/enable_jobs_api.rb
+++ b/services/api/lib/enable_jobs_api.rb
@@ -30,10 +30,9 @@ Disable_jobs_api_method_list = ["jobs.create",
"jobs.show",
"job_tasks.show"]
-def check_enable_legacy_jobs_api
- if Rails.configuration.enable_legacy_jobs_api == false ||
- (Rails.configuration.enable_legacy_jobs_api == "auto" &&
+ if Rails.configuration.Containers["JobsAPI"]["Enable"] == false ||
+ (Rails.configuration.Containers["JobsAPI"]["Enable"] == "auto" &&
Job.count == 0)
- Rails.configuration.disable_api_methods += Disable_jobs_api_method_list
+ Rails.configuration.API["DisabledAPIs"] += Disable_jobs_api_method_list
end
end
diff --git a/services/api/lib/josh_id.rb b/services/api/lib/josh_id.rb
index bb6c1f48a..396d72444 100644
--- a/services/api/lib/josh_id.rb
+++ b/services/api/lib/josh_id.rb
@@ -40,7 +40,7 @@ module OmniAuth
options.client_options[:site] = options[:custom_provider_url]
options.client_options[:authorize_url] = "#{options[:custom_provider_url]}/auth/josh_id/authorize"
options.client_options[:access_token_url] = "#{options[:custom_provider_url]}/auth/josh_id/access_token"
- if Rails.configuration.sso_insecure
+ if Rails.configuration.TLS["Insecure"]
options.client_options[:ssl] = {verify_mode: OpenSSL::SSL::VERIFY_NONE}
end
::OAuth2::Client.new(options.client_id, options.client_secret, deep_symbolize(options.client_options))
diff --git a/services/api/lib/load_param.rb b/services/api/lib/load_param.rb
index 736f270e9..0cc29ded0 100644
--- a/services/api/lib/load_param.rb
+++ b/services/api/lib/load_param.rb
@@ -56,7 +56,7 @@ module LoadParam
raise ArgumentError.new("Invalid value for limit parameter")
end
@limit = [params[:limit].to_i,
- Rails.configuration.max_items_per_response].min
+ Rails.configuration.API["MaxItemsPerResponse"]].min
else
@limit = DEFAULT_LIMIT
end
diff --git a/services/api/lib/log_reuse_info.rb b/services/api/lib/log_reuse_info.rb
index ed5cc82bf..01cf6dd78 100644
--- a/services/api/lib/log_reuse_info.rb
+++ b/services/api/lib/log_reuse_info.rb
@@ -9,7 +9,7 @@ module LogReuseInfo
# doing expensive things like database queries, and we want to skip
# those when logging is disabled.
def log_reuse_info(candidates=nil)
- if Rails.configuration.log_reuse_decisions
+ if Rails.configuration.Containers["LogReuseDecisions"]
msg = yield
if !candidates.nil?
msg = "have #{candidates.count} candidates " + msg
diff --git a/services/api/lib/refresh_permission_view.rb b/services/api/lib/refresh_permission_view.rb
index 25be3c08d..e7fa263c7 100644
--- a/services/api/lib/refresh_permission_view.rb
+++ b/services/api/lib/refresh_permission_view.rb
@@ -12,8 +12,8 @@ def do_refresh_permission_view
end
def refresh_permission_view(async=false)
- if async and Rails.configuration.async_permissions_update_interval > 0
- exp = Rails.configuration.async_permissions_update_interval.seconds
+ if async and Rails.configuration.API["AsyncPermissionsUpdateInterval"] > 0
+ exp = Rails.configuration.API["AsyncPermissionsUpdateInterval"].seconds
need = false
Rails.cache.fetch('AsyncRefreshPermissionView', expires_in: exp) do
need = true
diff --git a/services/api/lib/sweep_trashed_objects.rb b/services/api/lib/sweep_trashed_objects.rb
index bedbd68a4..6ade1fc56 100644
--- a/services/api/lib/sweep_trashed_objects.rb
+++ b/services/api/lib/sweep_trashed_objects.rb
@@ -55,8 +55,8 @@ module SweepTrashedObjects
end
def self.sweep_if_stale
- return if Rails.configuration.trash_sweep_interval <= 0
- exp = Rails.configuration.trash_sweep_interval.seconds
+ return if Rails.configuration.Collections["TrashSweepInterval"] <= 0
+ exp = Rails.configuration.Collections["TrashSweepInterval"].seconds
need = false
Rails.cache.fetch('SweepTrashedObjects', expires_in: exp) do
need = true
diff --git a/services/api/lib/tasks/delete_old_container_logs.rake b/services/api/lib/tasks/delete_old_container_logs.rake
index b45113e8a..c926c2ae0 100644
--- a/services/api/lib/tasks/delete_old_container_logs.rake
+++ b/services/api/lib/tasks/delete_old_container_logs.rake
@@ -11,7 +11,7 @@ namespace :db do
desc "Remove old container log entries from the logs table"
task delete_old_container_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.clean_container_log_rows_after} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN containers ON logs.object_uuid = containers.uuid WHERE event_type IN ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat') AND containers.log IS NOT NULL AND clock_timestamp() - containers.finished_at > interval '#{Rails.configuration.Containers["Logging"]["MaxAge"]} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
diff --git a/services/api/lib/tasks/delete_old_job_logs.rake b/services/api/lib/tasks/delete_old_job_logs.rake
index dcd92b19b..327f663b2 100644
--- a/services/api/lib/tasks/delete_old_job_logs.rake
+++ b/services/api/lib/tasks/delete_old_job_logs.rake
@@ -9,7 +9,7 @@
namespace :db do
desc "Remove old job stderr entries from the logs table"
task delete_old_job_logs: :environment do
- delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.clean_job_log_rows_after} seconds')"
+ delete_sql = "DELETE FROM logs WHERE id in (SELECT logs.id FROM logs JOIN jobs ON logs.object_uuid = jobs.uuid WHERE event_type = 'stderr' AND jobs.log IS NOT NULL AND clock_timestamp() - jobs.finished_at > interval '#{Rails.configuration.Rails.configuration.Containers["Logging"]["MaxAge"]} seconds')"
ActiveRecord::Base.connection.execute(delete_sql)
end
diff --git a/services/api/lib/trashable.rb b/services/api/lib/trashable.rb
index 968796296..4981b8cd3 100644
--- a/services/api/lib/trashable.rb
+++ b/services/api/lib/trashable.rb
@@ -50,7 +50,7 @@ module Trashable
if trash_at.nil?
self.delete_at = nil
else
- self.delete_at = trash_at + Rails.configuration.default_trash_lifetime.seconds
+ self.delete_at = trash_at + Rails.configuration.Collections["DefaultTrashLifetime"].seconds
end
elsif !trash_at || !delete_at || trash_at > delete_at
# Not trash, or bogus arguments? Just validate in
@@ -65,7 +65,7 @@ module Trashable
earliest_delete = [
@validation_timestamp,
trash_at_was,
- ].compact.min + Rails.configuration.blob_signature_ttl.seconds
+ ].compact.min + Rails.configuration.Collections["BlobSigningTTL"].seconds
# The previous value of delete_at is also an upper bound on the
# longest-lived permission token. For example, if TTL=14,
@@ -96,7 +96,7 @@ module TrashableController
@object.update_attributes!(trash_at: db_current_time)
end
earliest_delete = (@object.trash_at +
- Rails.configuration.blob_signature_ttl.seconds)
+ Rails.configuration.Collections["BlobSigningTTL"].seconds)
if @object.delete_at > earliest_delete
@object.update_attributes!(delete_at: earliest_delete)
end
commit 0195e1bbf4c1e5810f637212e9605d2d2dc03e7e
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Wed Mar 20 17:33:51 2019 -0400
13996: Updating API server to use new config object WIP
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
index ba9f7524e..53fc5d9cb 100644
--- a/lib/config/config.defaults.yml
+++ b/lib/config/config.defaults.yml
@@ -329,42 +329,49 @@ Clusters:
# (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
AssignNodeHostname: compute%<slot_number>d
- JobsAPI:
- # Enable the legacy Jobs API.
- # auto -- (default) enable the Jobs API only if it has been used before
- # (i.e., there are job records in the database)
- # true -- enable the Jobs API despite lack of existing records.
- # false -- disable the Jobs API despite presence of existing records.
- Enable: auto
-
- # Git repositories must be readable by api server, or you won't be
- # able to submit crunch jobs. To pass the test suites, put a clone
- # of the arvados tree in {git_repositories_dir}/arvados.git or
- # {git_repositories_dir}/arvados/.git
- GitInternalDir: /var/lib/arvados/internal.git
-
- # Docker image to be used when none found in runtime_constraints of a job
- DefaultDockerImage: ""
-
- # :none or :slurm_immediate
- CrunchJobWrapper: :none
-
- # username, or false = do not set uid when running jobs.
- CrunchJobUser: crunch
-
- # The web service must be able to create/write this file, and
- # crunch-job must be able to stat() it.
- CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
-
- # Control job reuse behavior when two completed jobs match the
- # search criteria and have different outputs.
- #
- # If true, in case of a conflict, reuse the earliest job (this is
- # similar to container reuse behavior).
- #
- # If false, in case of a conflict, do not reuse any completed job,
- # but do reuse an already-running job if available (this is the
- # original job reuse behavior, and is still the default).
- ReuseJobIfOutputsDiffer: false
-
- Mail: {}
+ JobsAPI:
+ # Enable the legacy Jobs API.
+ # auto -- (default) enable the Jobs API only if it has been used before
+ # (i.e., there are job records in the database)
+ # true -- enable the Jobs API despite lack of existing records.
+ # false -- disable the Jobs API despite presence of existing records.
+ Enable: auto
+
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ GitInternalDir: /var/lib/arvados/internal.git
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ DefaultDockerImage: ""
+
+ # :none or :slurm_immediate
+ CrunchJobWrapper: :none
+
+ # username, or false = do not set uid when running jobs.
+ CrunchJobUser: crunch
+
+ # The web service must be able to create/write this file, and
+ # crunch-job must be able to stat() it.
+ CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+ # Control job reuse behavior when two completed jobs match the
+ # search criteria and have different outputs.
+ #
+ # If true, in case of a conflict, reuse the earliest job (this is
+ # similar to container reuse behavior).
+ #
+ # If false, in case of a conflict, do not reuse any completed job,
+ # but do reuse an already-running job if available (this is the
+ # original job reuse behavior, and is still the default).
+ ReuseJobIfOutputsDiffer: false
+
+ Mail:
+ MailchimpAPIKey: # api-server/mailchimp_api_key
+ MailchimpListID: # api-server/mailchimp_list_id
+ SendUserSetupNotificationEmail: # workbench/send_user_setup_notification_email
+ IssueReporterEmailFrom: # workbench/issue_reporter_email_from
+ IssueReporterEmailTo: # workbench/issue_reporter_email_to
+ SupportEmailAddress: # workbench/support_email_address
+ EmailFrom: # workbench/email_from
diff --git a/services/api/app/controllers/application_controller.rb b/services/api/app/controllers/application_controller.rb
index b613e97a1..78fea32b2 100644
--- a/services/api/app/controllers/application_controller.rb
+++ b/services/api/app/controllers/application_controller.rb
@@ -306,7 +306,7 @@ class ApplicationController < ActionController::Base
limit_query.each do |record|
new_limit += 1
read_total += record.read_length.to_i
- if read_total >= Rails.configuration.max_index_database_read
+ if read_total >= Rails.configuration.API["MaxIndexDatabaseRead"]
new_limit -= 1 if new_limit > 1
@limit = new_limit
break
@@ -419,7 +419,7 @@ class ApplicationController < ActionController::Base
end
def disable_api_methods
- if Rails.configuration.disable_api_methods.
+ if Rails.configuration.API["DisabledAPIs"].
include?(controller_name + "." + action_name)
send_error("Disabled", status: 404)
end
diff --git a/services/api/app/controllers/arvados/v1/groups_controller.rb b/services/api/app/controllers/arvados/v1/groups_controller.rb
index f7db1ef12..825f9950d 100644
--- a/services/api/app/controllers/arvados/v1/groups_controller.rb
+++ b/services/api/app/controllers/arvados/v1/groups_controller.rb
@@ -191,7 +191,7 @@ class Arvados::V1::GroupsController < ApplicationController
table_names = Hash[klasses.collect { |k| [k, k.table_name] }]
- disabled_methods = Rails.configuration.disable_api_methods
+ disabled_methods = Rails.configuration.API["DisabledAPIs"]
avail_klasses = table_names.select{|k, t| !disabled_methods.include?(t+'.index')}
klasses = avail_klasses.keys
diff --git a/services/api/app/controllers/arvados/v1/schema_controller.rb b/services/api/app/controllers/arvados/v1/schema_controller.rb
index 8ff2a97c4..95e10498b 100644
--- a/services/api/app/controllers/arvados/v1/schema_controller.rb
+++ b/services/api/app/controllers/arvados/v1/schema_controller.rb
@@ -39,7 +39,7 @@ class Arvados::V1::SchemaController < ApplicationController
title: "Arvados API",
description: "The API to interact with Arvados.",
documentationLink: "http://doc.arvados.org/api/index.html",
- defaultCollectionReplication: Rails.configuration.default_collection_replication,
+ defaultCollectionReplication: Rails.configuration.Collections["DefaultReplication"],
protocol: "rest",
baseUrl: root_url + "arvados/v1/",
basePath: "/arvados/v1/",
@@ -70,7 +70,7 @@ class Arvados::V1::SchemaController < ApplicationController
when false
''
when true
- 'https://git.%s.arvadosapi.com/' % Rails.configuration.uuid_prefix
+ 'https://git.%s.arvadosapi.com/' % Rails.configuration.ClusterID
else
Rails.application.config.git_repo_https_base
end,
@@ -405,7 +405,7 @@ class Arvados::V1::SchemaController < ApplicationController
end
end
end
- Rails.configuration.disable_api_methods.each do |method|
+ Rails.configuration.API["DisabledAPIs"].each do |method|
ctrl, action = method.split('.', 2)
discovery[:resources][ctrl][:methods].delete(action.to_sym)
end
diff --git a/services/api/app/controllers/user_sessions_controller.rb b/services/api/app/controllers/user_sessions_controller.rb
index 237156f11..c5d3ae74f 100644
--- a/services/api/app/controllers/user_sessions_controller.rb
+++ b/services/api/app/controllers/user_sessions_controller.rb
@@ -52,7 +52,7 @@ class UserSessionsController < ApplicationController
:first_name => omniauth['info']['first_name'],
:last_name => omniauth['info']['last_name'],
:identity_url => omniauth['info']['identity_url'],
- :is_active => Rails.configuration.new_users_are_active,
+ :is_active => Rails.configuration.Users["NewUsersAreActive"],
:owner_uuid => system_user_uuid)
if omniauth['info']['username']
user.set_initial_username(requested: omniauth['info']['username'])
diff --git a/services/api/app/mailers/admin_notifier.rb b/services/api/app/mailers/admin_notifier.rb
index 87a5699f4..e454d64e0 100644
--- a/services/api/app/mailers/admin_notifier.rb
+++ b/services/api/app/mailers/admin_notifier.rb
@@ -5,32 +5,32 @@
class AdminNotifier < ActionMailer::Base
include AbstractController::Callbacks
- default from: Rails.configuration.admin_notifier_email_from
+ default from: Rails.configuration.Users["AdminNotifierEmailFrom"]
def new_user(user)
@user = user
- if not Rails.configuration.new_user_notification_recipients.empty? then
- @recipients = Rails.configuration.new_user_notification_recipients
+ if not Rails.configuration.Users["NewUserNotificationRecipients"].empty? then
+ @recipients = Rails.configuration.Users["NewUserNotificationRecipients"]
logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
add_to_subject = ''
- if Rails.configuration.auto_setup_new_users
+ if Rails.configuration.Users["AutoSetupNewUsers"]
add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'
end
mail(to: @recipients,
- subject: "#{Rails.configuration.email_subject_prefix}New user created#{add_to_subject} notification"
+ subject: "#{Rails.configuration.Users["EmailSubjectPrefix"]}New user created#{add_to_subject} notification"
)
end
end
def new_inactive_user(user)
@user = user
- if not Rails.configuration.new_inactive_user_notification_recipients.empty? then
- @recipients = Rails.configuration.new_inactive_user_notification_recipients
+ if not Rails.configuration.Users["NewInactiveUserNotificationRecipients"].empty? then
+ @recipients = Rails.configuration.Users["NewInactiveUserNotificationRecipients"]
logger.info "Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)"
mail(to: @recipients,
- subject: "#{Rails.configuration.email_subject_prefix}New inactive user notification"
+ subject: "#{Rails.configuration.Users["EmailSubjectPrefix"]}New inactive user notification"
)
end
end
diff --git a/services/api/app/mailers/profile_notifier.rb b/services/api/app/mailers/profile_notifier.rb
index 8c0c5ec86..a23d5f345 100644
--- a/services/api/app/mailers/profile_notifier.rb
+++ b/services/api/app/mailers/profile_notifier.rb
@@ -3,7 +3,7 @@
# SPDX-License-Identifier: AGPL-3.0
class ProfileNotifier < ActionMailer::Base
- default from: Rails.configuration.admin_notifier_email_from
+ default from: Rails.configuration.Users["AdminNotifierEmailFrom"]
def profile_created(user, address)
@user = user
diff --git a/services/api/app/mailers/user_notifier.rb b/services/api/app/mailers/user_notifier.rb
index 5fb7036bf..dbde7a973 100644
--- a/services/api/app/mailers/user_notifier.rb
+++ b/services/api/app/mailers/user_notifier.rb
@@ -5,7 +5,7 @@
class UserNotifier < ActionMailer::Base
include AbstractController::Callbacks
- default from: Rails.configuration.user_notifier_email_from
+ default from: Rails.configuration.Users["UserNotifierEmailFrom"]
def account_is_setup(user)
@user = user
diff --git a/services/api/app/models/arvados_model.rb b/services/api/app/models/arvados_model.rb
index e619abe8c..00f60e6fb 100644
--- a/services/api/app/models/arvados_model.rb
+++ b/services/api/app/models/arvados_model.rb
@@ -411,7 +411,7 @@ class ArvadosModel < ApplicationRecord
end
def logged_attributes
- attributes.except(*Rails.configuration.unlogged_attributes)
+ attributes.except(*Rails.configuration.AuditLogs["UnloggedAttributes"])
end
def self.full_text_searchable_columns
@@ -735,7 +735,7 @@ class ArvadosModel < ApplicationRecord
end
def self.uuid_like_pattern
- "#{Rails.configuration.uuid_prefix}-#{uuid_prefix}-_______________"
+ "#{Rails.configuration.ClusterID}-#{uuid_prefix}-_______________"
end
def self.uuid_regex
@@ -814,8 +814,8 @@ class ArvadosModel < ApplicationRecord
end
def is_audit_logging_enabled?
- return !(Rails.configuration.max_audit_log_age.to_i == 0 &&
- Rails.configuration.max_audit_log_delete_batch.to_i > 0)
+ return !(Rails.configuration.AuditLogs["MaxAge"].to_i == 0 &&
+ Rails.configuration.AuditLogs["MaxDeleteBatch"].to_i > 0)
end
def log_start_state
diff --git a/services/api/app/models/blob.rb b/services/api/app/models/blob.rb
index 55a257856..500a66279 100644
--- a/services/api/app/models/blob.rb
+++ b/services/api/app/models/blob.rb
@@ -51,15 +51,15 @@ class Blob
timestamp = opts[:expire]
else
timestamp = db_current_time.to_i +
- (opts[:ttl] || Rails.configuration.blob_signature_ttl)
+ (opts[:ttl] || Rails.configuration.Collections["BlobSigningTTL"])
end
timestamp_hex = timestamp.to_s(16)
# => "53163cb4"
- blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+ blob_signature_ttl = Rails.configuration.Collections["BlobSigningTTL"].to_s(16)
# Generate a signature.
signature =
- generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+ generate_signature((opts[:key] or Rails.configuration.Collections["BlobSigningKey"]),
blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl)
blob_locator + '+A' + signature + '@' + timestamp_hex
@@ -103,10 +103,10 @@ class Blob
if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)
raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'
end
- blob_signature_ttl = Rails.configuration.blob_signature_ttl.to_s(16)
+ blob_signature_ttl = Rails.configuration.Collections["BlobSigningTTL"].to_s(16)
my_signature =
- generate_signature((opts[:key] or Rails.configuration.blob_signing_key),
+ generate_signature((opts[:key] or Rails.configuration.Collections["BlobSigningKey"]),
blob_hash, opts[:api_token], timestamp, blob_signature_ttl)
if my_signature != given_signature
diff --git a/services/api/app/models/commit.rb b/services/api/app/models/commit.rb
index 921c690cd..ed3c5cd35 100644
--- a/services/api/app/models/commit.rb
+++ b/services/api/app/models/commit.rb
@@ -148,7 +148,7 @@ class Commit < ActiveRecord::Base
unless src_gitdir
raise ArgumentError.new "no local repository for #{repo_name}"
end
- dst_gitdir = Rails.configuration.git_internal_dir
+ dst_gitdir = Rails.configuration.Containers["JobsAPI"]["GitInternalDir"]
begin
commit_in_dst = must_git(dst_gitdir, "log -n1 --format=%H #{sha1.shellescape}^{commit}").strip
diff --git a/services/api/app/models/job.rb b/services/api/app/models/job.rb
index 420386cdc..deaa8ff71 100644
--- a/services/api/app/models/job.rb
+++ b/services/api/app/models/job.rb
@@ -287,7 +287,7 @@ class Job < ArvadosModel
log_reuse_info { "job #{j.uuid} has nil output" }
elsif j.log.nil?
log_reuse_info { "job #{j.uuid} has nil log" }
- elsif Rails.configuration.reuse_job_if_outputs_differ
+ elsif Rails.configuration.Containers["JobsAPI"]["ReuseJobIfOutputsDiffer"]
if !Collection.readable_by(current_user).find_by_portable_data_hash(j.output)
# Ignore: keep looking for an incomplete job or one whose
# output is readable.
@@ -493,7 +493,7 @@ class Job < ArvadosModel
def find_docker_image_locator
if runtime_constraints.is_a? Hash
runtime_constraints['docker_image'] ||=
- Rails.configuration.default_docker_image_for_jobs
+ Rails.configuration.Containers["JobsAPI"]["DefaultDockerImage"]
end
resolve_runtime_constraint("docker_image",
@@ -569,7 +569,7 @@ class Job < ArvadosModel
def trigger_crunch_dispatch_if_cancelled
if @need_crunch_dispatch_trigger
- File.open(Rails.configuration.crunch_refresh_trigger, 'wb') do
+ File.open(Rails.configuration.Containers["JobsAPI"]["CrunchRefreshTrigger"], 'wb') do
# That's all, just create/touch a file for crunch-job to see.
end
end
diff --git a/services/api/app/models/repository.rb b/services/api/app/models/repository.rb
index 48655156c..bde9d51d2 100644
--- a/services/api/app/models/repository.rb
+++ b/services/api/app/models/repository.rb
@@ -49,7 +49,7 @@ class Repository < ArvadosModel
# prefers bare repositories over checkouts.
[["%s.git"], ["%s", ".git"]].each do |repo_base, *join_args|
[:uuid, :name].each do |path_attr|
- git_dir = File.join(Rails.configuration.git_repositories_dir,
+ git_dir = File.join(Rails.configuration.Containers["Git"]["Repositories"],
repo_base % send(path_attr), *join_args)
return git_dir if File.exist?(git_dir)
end
@@ -108,8 +108,8 @@ class Repository < ArvadosModel
def _clone_url config_var, default_base_fmt
configured_base = Rails.configuration.send config_var
return nil if configured_base == false
- prefix = new_record? ? Rails.configuration.uuid_prefix : uuid[0,5]
- if prefix == Rails.configuration.uuid_prefix and configured_base != true
+ prefix = new_record? ? Rails.configuration.ClusterID : uuid[0,5]
+ if prefix == Rails.configuration.ClusterID and configured_base != true
base = configured_base
else
base = default_base_fmt % prefix
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index de85cc5a8..49d3afe7b 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -34,7 +34,7 @@ class User < ArvadosModel
after_create :add_system_group_permission_link
after_create :invalidate_permissions_cache
after_create :auto_setup_new_user, :if => Proc.new { |user|
- Rails.configuration.auto_setup_new_users and
+ Rails.configuration.Users["AutoSetupNewUsers"] and
(user.uuid != system_user_uuid) and
(user.uuid != anonymous_user_uuid)
}
@@ -81,7 +81,7 @@ class User < ArvadosModel
def is_invited
!!(self.is_active ||
- Rails.configuration.new_users_are_active ||
+ Rails.configuration.Users["NewUsersAreActive"] ||
self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)
end
@@ -358,15 +358,15 @@ class User < ArvadosModel
current_user.andand.is_admin or
(self == current_user &&
self.redirect_to_user_uuid.nil? &&
- self.is_active == Rails.configuration.new_users_are_active)
+ self.is_active == Rails.configuration.Users["NewUsersAreActive"])
end
def check_auto_admin
return if self.uuid.end_with?('anonymouspublic')
if (User.where("email = ?",self.email).where(:is_admin => true).count == 0 and
- Rails.configuration.auto_admin_user and self.email == Rails.configuration.auto_admin_user) or
+ Rails.configuration.Users["AutoAdminUserWithEmail"] and self.email == Rails.configuration.Users["AutoAdminUserWithEmail"]) or
(User.where("uuid not like '%-000000000000000'").where(:is_admin => true).count == 0 and
- Rails.configuration.auto_admin_first_user)
+ Rails.configuration.Users["AutoAdminFirstUser"])
self.is_admin = true
self.is_active = true
end
@@ -381,7 +381,7 @@ class User < ArvadosModel
quoted_name = self.class.connection.quote_string(basename)
next_username = basename
next_suffix = 1
- while Rails.configuration.auto_setup_name_blacklist.include?(next_username)
+ while Rails.configuration.Users["AutoSetupUsernameBlacklist"].include?(next_username)
next_suffix += 1
next_username = "%s%i" % [basename, next_suffix]
end
@@ -563,10 +563,10 @@ class User < ArvadosModel
def auto_setup_new_user
setup(openid_prefix: Rails.configuration.default_openid_prefix)
if username
- create_vm_login_permission_link(Rails.configuration.auto_setup_new_users_with_vm_uuid,
+ create_vm_login_permission_link(Rails.configuration.Users["AutoSetupNewUsersWithVmUUID"],
username)
repo_name = "#{username}/#{username}"
- if Rails.configuration.auto_setup_new_users_with_repository and
+ if Rails.configuration.Users["AutoSetupNewUsersWithRepository"] and
Repository.where(name: repo_name).first.nil?
repo = Repository.create!(name: repo_name, owner_uuid: uuid)
Link.create!(tail_uuid: uuid, head_uuid: repo.uuid,
@@ -579,7 +579,7 @@ class User < ArvadosModel
def send_profile_created_notification
if self.prefs_changed?
if self.prefs_was.andand.empty? || !self.prefs_was.andand['profile']
- profile_notification_address = Rails.configuration.user_profile_notification_address
+ profile_notification_address = Rails.configuration.Users["UserProfileNotificationAddress"]
ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address
end
end
commit d7ef7ddbfac8fe5e0641101f3be58f7e0b35dad0
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 19 17:50:18 2019 -0400
13996: Migrating defaults to new config structure
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/lib/config/config.defaults.yml b/lib/config/config.defaults.yml
new file mode 100644
index 000000000..ba9f7524e
--- /dev/null
+++ b/lib/config/config.defaults.yml
@@ -0,0 +1,370 @@
+#
+#
+Clusters:
+ xxxxx:
+ SystemRootToken: ""
+
+ # Token to be included in all healthcheck requests. Disabled by default.
+ # Server expects request header of the format "Authorization: Bearer xxx"
+ ManagementToken: ""
+
+ API:
+ # Maximum size (in bytes) allowed for a single API request. This
+ # limit is published in the discovery document for use by clients.
+ # Note: You must separately configure the upstream web server or
+ # proxy to actually enforce the desired maximum request size on the
+ # server side.
+ MaxRequestSize: 134217728
+
+ # Limit the number of bytes read from the database during an index
+ # request (by retrieving and returning fewer rows than would
+ # normally be returned in a single response).
+ # Note 1: This setting never reduces the number of returned rows to
+ # zero, no matter how big the first data row is.
+ # Note 2: Currently, this is only checked against a specific set of
+ # columns that tend to get large (collections.manifest_text,
+ # containers.mounts, workflows.definition). Other fields (e.g.,
+ # "properties" hashes) are not counted against this limit.
+ MaxIndexDatabaseRead: 134217728
+
+ # Maximum number of items to return when responding to a APIs that
+ # can return partial result sets using limit and offset parameters
+ # (e.g., *.index, groups.contents). If a request specifies a "limit"
+ # parameter higher than this value, this value is used instead.
+ MaxItemsPerResponse: 1000
+
+ # API methods to disable. Disabled methods are not listed in the
+ # discovery document, and respond 404 to all requests.
+ # Example: ["jobs.create", "pipeline_instances.create"]
+ DisabledAPIs: []
+
+ Users:
+ # Config parameters to automatically setup new users. If enabled,
+ # this users will be able to self-activate. Enable this if you want
+ # to run an open instance where anyone can create an account and use
+ # the system without requiring manual approval.
+ #
+ # The params auto_setup_new_users_with_* are meaningful only when auto_setup_new_users is turned on.
+ # auto_setup_name_blacklist is a list of usernames to be blacklisted for auto setup.
+ AutoSetupNewUsers: false
+ AutoSetupNewUsersWithVmUUID: ""
+ AutoSetupNewUsersWithRepository: false
+ AutoSetupUsernameBlacklist: [arvados, git, gitolite, gitolite-admin, root, syslog]
+
+ # When new_users_are_active is set to true, new users will be active
+ # immediately. This skips the "self-activate" step which enforces
+ # user agreements. Should only be enabled for development.
+ NewUsersAreActive: false
+
+ # The e-mail address of the user you would like to become marked as an admin
+ # user on their first login.
+ # In the default configuration, authentication happens through the Arvados SSO
+ # server, which uses OAuth2 against Google's servers, so in that case this
+ # should be an address associated with a Google account.
+ AutoAdminUserWithEmail: ""
+
+ # If auto_admin_first_user is set to true, the first user to log in when no
+ # other admin users exist will automatically become an admin user.
+ AutoAdminFirstUser: false
+
+ # Email address to notify whenever a user creates a profile for the
+ # first time
+ UserProfileNotificationAddress: ""
+ AdminNotifierEmailFrom: arvados at example.com
+ EmailSubjectPrefix: "[ARVADOS] "
+ UserNotifierEmailFrom: arvados at example.com
+ NewUserNotificationRecipients: []
+ NewInactiveUserNotificationRecipients: []
+
+ AuditLogs:
+ # Time to keep audit logs, in seconds. (An audit log is a row added
+ # to the "logs" table in the PostgreSQL database each time an
+ # Arvados object is created, modified, or deleted.)
+ #
+ # Currently, websocket event notifications rely on audit logs, so
+ # this should not be set lower than 600 (5 minutes).
+ MaxAge: 1209600
+
+ # Maximum number of log rows to delete in a single SQL transaction.
+ #
+ # If max_audit_log_delete_batch is 0, log entries will never be
+ # deleted by Arvados. Cleanup can be done by an external process
+ # without affecting any Arvados system processes, as long as very
+ # recent (<5 minutes old) logs are not deleted.
+ #
+ # 100000 is a reasonable batch size for most sites.
+ MaxDeleteBatch: 0
+
+ # Attributes to suppress in events and audit logs. Notably,
+ # specifying ["manifest_text"] here typically makes the database
+ # smaller and faster.
+ #
+ # Warning: Using any non-empty value here can have undesirable side
+ # effects for any client or component that relies on event logs.
+ # Use at your own risk.
+ UnloggedAttributes: []
+
+ SystemLogs:
+ # Maximum characters of (JSON-encoded) query parameters to include
+ # in each request log entry. When params exceed this size, they will
+ # be JSON-encoded, truncated to this size, and logged as
+ # params_truncated.
+ MaxRequestLogParamsSize: 2000
+
+ Collections:
+ # Allow clients to create collections by providing a manifest with
+ # unsigned data blob locators. IMPORTANT: This effectively disables
+ # access controls for data stored in Keep: a client who knows a hash
+ # can write a manifest that references the hash, pass it to
+ # collections.create (which will create a permission link), use
+ # collections.get to obtain a signature for that data locator, and
+ # use that signed locator to retrieve the data from Keep. Therefore,
+ # do not turn this on if your users expect to keep data private from
+ # one another!
+ BlobSigning: true
+
+ # blob_signing_key is a string of alphanumeric characters used to
+ # generate permission signatures for Keep locators. It must be
+ # identical to the permission key given to Keep. IMPORTANT: This is
+ # a site secret. It should be at least 50 characters.
+ #
+ # Modifying blob_signing_key will invalidate all existing
+ # signatures, which can cause programs to fail (e.g., arv-put,
+ # arv-get, and Crunch jobs). To avoid errors, rotate keys only when
+ # no such processes are running.
+ BlobSigningKey: ""
+
+ # Default replication level for collections. This is used when a
+ # collection's replication_desired attribute is nil.
+ DefaultReplication: 2
+
+ # Lifetime (in seconds) of blob permission signatures generated by
+ # the API server. This determines how long a client can take (after
+ # retrieving a collection record) to retrieve the collection data
+ # from Keep. If the client needs more time than that (assuming the
+ # collection still has the same content and the relevant user/token
+ # still has permission) the client can retrieve the collection again
+ # to get fresh signatures.
+ #
+ # This must be exactly equal to the -blob-signature-ttl flag used by
+ # keepstore servers. Otherwise, reading data blocks and saving
+ # collections will fail with HTTP 403 permission errors.
+ #
+ # Modifying blob_signature_ttl invalidates existing signatures; see
+ # blob_signing_key note above.
+ #
+ # The default is 2 weeks.
+ BlobSigningTTL: 1209600
+
+ # Default lifetime for ephemeral collections: 2 weeks. This must not
+ # be less than blob_signature_ttl.
+ DefaultTrashLifetime: 1209600
+
+ # Interval (seconds) between trash sweeps. During a trash sweep,
+ # collections are marked as trash if their trash_at time has
+ # arrived, and deleted if their delete_at time has arrived.
+ TrashSweepInterval: 60
+
+ # Interval (seconds) between asynchronous permission view updates. Any
+ # permission-updating API called with the 'async' parameter schedules a an
+ # update on the permission view in the future, if not already scheduled.
+ AsyncPermissionsUpdateInterval: 20
+
+ # If true, enable collection versioning.
+ # When a collection's preserve_version field is true or the current version
+ # is older than the amount of seconds defined on preserve_version_if_idle,
+ # a snapshot of the collection's previous state is created and linked to
+ # the current collection.
+ CollectionVersioning: false
+
+ # 0 = auto-create a new version on every update.
+ # -1 = never auto-create new versions.
+ # > 0 = auto-create a new version when older than the specified number of seconds.
+ PreserveVersionIfIdle: -1
+
+ Login:
+ # These settings are provided by your OAuth2 provider (e.g.,
+ # sso-provider).
+ ProviderAppSecret: ""
+ ProviderAppID: ""
+
+ Git:
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ Repositories: /var/lib/arvados/git/repositories
+
+ Containers:
+ # List of supported Docker Registry image formats that compute nodes
+ # are able to use. `arv keep docker` will error out if a user tries
+ # to store an image with an unsupported format. Use an empty array
+ # to skip the compatibility check (and display a warning message to
+ # that effect).
+ #
+ # Example for sites running docker < 1.10: ["v1"]
+ # Example for sites running docker >= 1.10: ["v2"]
+ # Example for disabling check: []
+ SupportedDockerImageFormats: ["v2"]
+
+ # Include details about job reuse decisions in the server log. This
+ # causes additional database queries to run, so it should not be
+ # enabled unless you expect to examine the resulting logs for
+ # troubleshooting purposes.
+ LogReuseDecisions: false
+
+ # Default value for keep_cache_ram of a container's runtime_constraints.
+ DefaultKeepCacheRAM: 268435456
+
+ # Number of times a container can be unlocked before being
+ # automatically cancelled.
+ MaxDispatchAttempts: 5
+
+ # Default value for container_count_max for container requests. This is the
+ # number of times Arvados will create a new container to satisfy a container
+ # request. If a container is cancelled it will retry a new container if
+ # container_count < container_count_max on any container requests associated
+ # with the cancelled container.
+ MaxRetryAttempts: 3
+
+ # The maximum number of compute nodes that can be in use simultaneously
+ # If this limit is reduced, any existing nodes with slot number >= new limit
+ # will not be counted against the new limit. In other words, the new limit
+ # won't be strictly enforced until those nodes with higher slot numbers
+ # go down.
+ MaxComputeVMs: 64
+
+ # Preemptible instance support (e.g. AWS Spot Instances)
+ # When true, child containers will get created with the preemptible
+ # scheduling parameter parameter set.
+ UsePreemptibleInstances: false
+
+ # Include details about job reuse decisions in the server log. This
+ # causes additional database queries to run, so it should not be
+ # enabled unless you expect to examine the resulting logs for
+ # troubleshooting purposes.
+ LogReuseDecisions: false
+
+ Logging:
+ # When you run the db:delete_old_container_logs task, it will find
+ # containers that have been finished for at least this many seconds,
+ # and delete their stdout, stderr, arv-mount, crunch-run, and
+ # crunchstat logs from the logs table.
+ MaxAge: 30d
+
+ # These two settings control how frequently log events are flushed to the
+ # database. Log lines are buffered until either crunch_log_bytes_per_event
+ # has been reached or crunch_log_seconds_between_events has elapsed since
+ # the last flush.
+ LogBytesPerEvent: 4096
+ LogSecondsBetweenEvents: 1
+
+ # The sample period for throttling logs, in seconds.
+ LogThrottlePeriod: 60
+
+ # Maximum number of bytes that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ LogThrottleBytes: 65536
+
+ # Maximum number of lines that job can log over crunch_log_throttle_period
+ # before being silenced until the end of the period.
+ LogThrottleLines: 1024
+
+ # Maximum bytes that may be logged by a single job. Log bytes that are
+ # silenced by throttling are not counted against this total.
+ LimitLogBytesPerJob: 67108864
+
+ LogPartialLineThrottlePeriod: 5
+
+ # Container logs are written to Keep and saved in a collection,
+ # which is updated periodically while the container runs. This
+ # value sets the interval (given in seconds) between collection
+ # updates.
+ LogUpdatePeriod: 1800
+
+ # The log collection is also updated when the specified amount of
+ # log data (given in bytes) is produced in less than one update
+ # period.
+ LogUpdateSize: 33554432
+
+ SLURM:
+ Managed:
+ # Path to dns server configuration directory
+ # (e.g. /etc/unbound.d/conf.d). If false, do not write any config
+ # files or touch restart.txt (see below).
+ DNSServerConfDir: ""
+
+ # Template file for the dns server host snippets. See
+ # unbound.template in this directory for an example. If false, do
+ # not write any config files.
+ DNSServerConfTemplate: ""
+
+ # String to write to {dns_server_conf_dir}/restart.txt (with a
+ # trailing newline) after updating local data. If false, do not
+ # open or write the restart.txt file.
+ DNSServerReloadCommand: ""
+
+ # Command to run after each DNS update. Template variables will be
+ # substituted; see the "unbound" example below. If false, do not run
+ # a command.
+ DNSServerUpdateCommand: ""
+
+ ComputeNodeDomain: ""
+ ComputeNodeNameservers:
+ - 192.168.1.1
+
+ # Hostname to assign to a compute node when it sends a "ping" and the
+ # hostname in its Node record is nil.
+ # During bootstrapping, the "ping" script is expected to notice the
+ # hostname given in the ping response, and update its unix hostname
+ # accordingly.
+ # If false, leave the hostname alone (this is appropriate if your compute
+ # nodes' hostnames are already assigned by some other mechanism).
+ #
+ # One way or another, the hostnames of your node records should agree
+ # with your DNS records and your /etc/slurm-llnl/slurm.conf files.
+ #
+ # Example for compute0000, compute0001, ....:
+ # assign_node_hostname: compute%<slot_number>04d
+ # (See http://ruby-doc.org/core-2.2.2/Kernel.html#method-i-format for more.)
+ AssignNodeHostname: compute%<slot_number>d
+
+ JobsAPI:
+ # Enable the legacy Jobs API.
+ # auto -- (default) enable the Jobs API only if it has been used before
+ # (i.e., there are job records in the database)
+ # true -- enable the Jobs API despite lack of existing records.
+ # false -- disable the Jobs API despite presence of existing records.
+ Enable: auto
+
+ # Git repositories must be readable by api server, or you won't be
+ # able to submit crunch jobs. To pass the test suites, put a clone
+ # of the arvados tree in {git_repositories_dir}/arvados.git or
+ # {git_repositories_dir}/arvados/.git
+ GitInternalDir: /var/lib/arvados/internal.git
+
+ # Docker image to be used when none found in runtime_constraints of a job
+ DefaultDockerImage: ""
+
+ # :none or :slurm_immediate
+ CrunchJobWrapper: :none
+
+ # username, or false = do not set uid when running jobs.
+ CrunchJobUser: crunch
+
+ # The web service must be able to create/write this file, and
+ # crunch-job must be able to stat() it.
+ CrunchRefreshTrigger: /tmp/crunch_refresh_trigger
+
+ # Control job reuse behavior when two completed jobs match the
+ # search criteria and have different outputs.
+ #
+ # If true, in case of a conflict, reuse the earliest job (this is
+ # similar to container reuse behavior).
+ #
+ # If false, in case of a conflict, do not reuse any completed job,
+ # but do reuse an already-running job if available (this is the
+ # original job reuse behavior, and is still the default).
+ ReuseJobIfOutputsDiffer: false
+
+ Mail: {}
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index fc091e375..f52e50089 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -46,6 +46,7 @@ config_key_map =
"max_request_size": "API.MaxRequestSize",
"max_index_database_read": "API.MaxIndexDatabaseRead",
"max_items_per_response": "API.MaxItemsPerResponse",
+ "async_permissions_update_interval": "API.AsyncPermissionsUpdateInterval",
"auto_setup_new_users": "Users.AutoSetupNewUsers",
"auto_setup_new_users_with_vm_uuid": "Users.AutoSetupNewUsersWithVmUUID",
"auto_setup_new_users_with_repository": "Users.AutoSetupNewUsersWithRepository",
@@ -72,7 +73,7 @@ config_key_map =
"trash_sweep_interval": "Collections.TrashSweepInterval",
"blob_signing_key": "Collections.BlobSigningKey",
"blob_signature_ttl": "Collections.BlobSigningTTL",
- "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning",
+ "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning", # XXX
"docker_image_formats": "Containers.SupportedDockerImageFormats",
"log_reuse_decisions": "Containers.LogReuseDecisions",
"container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM",
@@ -97,6 +98,7 @@ config_key_map =
"compute_node_domain": "Containers.SLURM.Managed.ComputeNodeDomain",
"compute_node_nameservers": "Containers.SLURM.Managed.ComputeNodeNameservers",
"assign_node_hostname": "Containers.SLURM.Managed.AssignNodeHostname",
+ "enable_legacy_jobs_api": "Containers.JobsAPI.Enable",
"crunch_job_wrapper": "Containers.JobsAPI.CrunchJobWrapper",
"crunch_job_user": "Containers.JobsAPI.CrunchJobUser",
"crunch_refresh_trigger": "Containers.JobsAPI.CrunchRefreshTrigger",
@@ -141,6 +143,8 @@ application_config.each do |k, v|
end
end
+puts $arvados_config.to_yaml
+
Server::Application.configure do
nils = []
$arvados_config.each do |k, v|
commit 91545d0a1fb40c79eef0f3013f832734f7a9695e
Author: Peter Amstutz <pamstutz at veritasgenetics.com>
Date: Tue Mar 19 16:40:19 2019 -0400
13996: Config migration WIP
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <pamstutz at veritasgenetics.com>
diff --git a/services/api/config/config.defaults.yml b/services/api/config/config.defaults.yml
new file mode 120000
index 000000000..3a43d4bcd
--- /dev/null
+++ b/services/api/config/config.defaults.yml
@@ -0,0 +1 @@
+../../../lib/config/config.defaults.yml
\ No newline at end of file
diff --git a/services/api/config/initializers/load_config.rb b/services/api/config/initializers/load_config.rb
index 16059cad7..fc091e375 100644
--- a/services/api/config/initializers/load_config.rb
+++ b/services/api/config/initializers/load_config.rb
@@ -26,8 +26,88 @@ EOS
WARNED_OMNIAUTH_CONFIG = true
end
-$application_config = {}
+$arvados_config = {}
+["#{::Rails.root.to_s}/config/config.defaults.yml", "/etc/arvados/config.yml"].each do |path|
+ if File.exist? path
+ confs = YAML.load(IO.read(path), deserialize_symbols: false)
+ if confs
+ clusters = confs["Clusters"].first
+ $arvados_config["ClusterID"] = clusters[0]
+ $arvados_config.merge!(clusters[1])
+ end
+ end
+end
+
+config_key_map =
+ {
+ "git_repositories_dir": "Git.Repositories",
+ "disable_api_methods": "API.DisabledAPIs",
+ "max_request_size": "API.MaxRequestSize",
+ "max_index_database_read": "API.MaxIndexDatabaseRead",
+ "max_items_per_response": "API.MaxItemsPerResponse",
+ "auto_setup_new_users": "Users.AutoSetupNewUsers",
+ "auto_setup_new_users_with_vm_uuid": "Users.AutoSetupNewUsersWithVmUUID",
+ "auto_setup_new_users_with_repository": "Users.AutoSetupNewUsersWithRepository",
+ "auto_setup_name_blacklist": "Users.AutoSetupUsernameBlacklist",
+ "new_users_are_active": "Users.NewUsersAreActive",
+ "auto_admin_user": "Users.AutoAdminUserWithEmail",
+ "auto_admin_first_user": "Users.AutoAdminFirstUser",
+ "user_profile_notification_address": "Users.UserProfileNotificationAddress",
+ "admin_notifier_email_from": "Users.AdminNotifierEmailFrom",
+ "email_subject_prefix": "Users.EmailSubjectPrefix",
+ "user_notifier_email_from": "Users.UserNotifierEmailFrom",
+ "new_user_notification_recipients": "Users.NewUserNotificationRecipients",
+ "new_inactive_user_notification_recipients": "Users.NewInactiveUserNotificationRecipients",
+ "sso_app_secret": "Login.ProviderAppSecret",
+ "sso_app_id": "Login.ProviderAppID",
+ "max_audit_log_age": "AuditLogs.MaxAge",
+ "max_audit_log_delete_batch": "AuditLogs.MaxDeleteBatch",
+ "unlogged_attributes": "AuditLogs.UnloggedAttributes",
+ "max_request_log_params_size": "SystemLogs.MaxRequestLogParamsSize",
+ "default_collection_replication": "Collections.DefaultReplication",
+ "default_trash_lifetime": "Collections.DefaultTrashLifetime",
+ "collection_versioning": "Collections.CollectionVersioning",
+ "preserve_version_if_idle": "Collections.PreserveVersionIfIdle",
+ "trash_sweep_interval": "Collections.TrashSweepInterval",
+ "blob_signing_key": "Collections.BlobSigningKey",
+ "blob_signature_ttl": "Collections.BlobSigningTTL",
+ "permit_create_collection_with_unsigned_manifest": "Collections.BlobSigning",
+ "docker_image_formats": "Containers.SupportedDockerImageFormats",
+ "log_reuse_decisions": "Containers.LogReuseDecisions",
+ "container_default_keep_cache_ram": "Containers.DefaultKeepCacheRAM",
+ "max_container_dispatch_attempts": "Containers.MaxDispatchAttempts",
+ "container_count_max": "Containers.MaxRetryAttempts",
+ "preemptible_instances": "Containers.UsePreemptibleInstances",
+ "max_compute_nodes": "Containers.MaxComputeVMs",
+ "crunch_log_bytes_per_event": "Containers.Logging.LogBytesPerEvent",
+ "crunch_log_seconds_between_events": "Containers.Logging.LogSecondsBetweenEvents",
+ "crunch_log_throttle_period": "Containers.Logging.LogThrottlePeriod",
+ "crunch_log_throttle_bytes": "Containers.Logging.LogThrottleBytes",
+ "crunch_log_throttle_lines": "Containers.Logging.LogThrottleLines",
+ "crunch_limit_log_bytes_per_job": "Containers.Logging.LimitLogBytesPerJob",
+ "crunch_log_partial_line_throttle_period": "Containers.Logging.LogPartialLineThrottlePeriod",
+ "crunch_log_update_period": "Containers.Logging.LogUpdatePeriod",
+ "crunch_log_update_size": "Containers.Logging.LogUpdateSize",
+ "clean_container_log_rows_after": "Containers.Logging.MaxAge",
+ "dns_server_conf_dir": "Containers.SLURM.Managed.DNSServerConfDir",
+ "dns_server_conf_template": "Containers.SLURM.Managed.DNSServerConfTemplate",
+ "dns_server_reload_command": "Containers.SLURM.Managed.DNSServerReloadCommand",
+ "dns_server_update_command": "Containers.SLURM.Managed.DNSServerUpdateCommand",
+ "compute_node_domain": "Containers.SLURM.Managed.ComputeNodeDomain",
+ "compute_node_nameservers": "Containers.SLURM.Managed.ComputeNodeNameservers",
+ "assign_node_hostname": "Containers.SLURM.Managed.AssignNodeHostname",
+ "crunch_job_wrapper": "Containers.JobsAPI.CrunchJobWrapper",
+ "crunch_job_user": "Containers.JobsAPI.CrunchJobUser",
+ "crunch_refresh_trigger": "Containers.JobsAPI.CrunchRefreshTrigger",
+ "git_internal_dir": "Containers.JobsAPI.GitInternalDir",
+ "reuse_job_if_outputs_differ": "Containers.JobsAPI.ReuseJobIfOutputsDiffer",
+ "default_docker_image_for_jobs": "Containers.JobsAPI.DefaultDockerImage",
+ "mailchimp_api_key": "Mail.MailchimpAPIKey",
+ "mailchimp_list_id": "Mail.MailchimpListID",
+}
+
+application_config = {}
%w(application.default application).each do |cfgfile|
path = "#{::Rails.root.to_s}/config/#{cfgfile}.yml"
if File.exist? path
@@ -35,26 +115,41 @@ $application_config = {}
confs = YAML.load(yaml, deserialize_symbols: true)
# Ignore empty YAML file:
next if confs == false
- $application_config.merge!(confs['common'] || {})
- $application_config.merge!(confs[::Rails.env.to_s] || {})
+ application_config.merge!(confs['common'] || {})
+ application_config.merge!(confs[::Rails.env.to_s] || {})
+ end
+end
+
+application_config.each do |k, v|
+ cfg = $arvados_config
+
+ if config_key_map[k.to_sym]
+ k = config_key_map[k.to_sym]
+ end
+
+ # "foo.bar: baz" --> { config.foo.bar = baz }
+ ks = k.split '.'
+ k = ks.pop
+ ks.each do |kk|
+ cfg = cfg[kk]
+ if cfg.nil?
+ break
+ end
+ end
+ if !cfg.nil?
+ cfg[k] = v
end
end
Server::Application.configure do
nils = []
- $application_config.each do |k, v|
- # "foo.bar: baz" --> { config.foo.bar = baz }
+ $arvados_config.each do |k, v|
cfg = config
- ks = k.split '.'
- k = ks.pop
- ks.each do |kk|
- cfg = cfg.send(kk)
- end
if cfg.respond_to?(k.to_sym) and !cfg.send(k).nil?
- # Config must have been set already in environments/*.rb.
- #
- # After config files have been migrated, this mechanism should
- # be deprecated, then removed.
+ # Config must have been set already in environments/*.rb.
+ #
+ # After config files have been migrated, this mechanism should
+ # be deprecated, then removed.
elsif v.nil?
# Config variables are not allowed to be nil. Make a "naughty"
# list, and present it below.
diff --git a/services/api/lib/tasks/config_dump.rake b/services/api/lib/tasks/config_dump.rake
index ed349600b..4532225a3 100644
--- a/services/api/lib/tasks/config_dump.rake
+++ b/services/api/lib/tasks/config_dump.rake
@@ -5,6 +5,6 @@
namespace :config do
desc 'Show site configuration'
task dump: :environment do
- puts $application_config.to_yaml
+ puts $arvados_config.to_yaml
end
end
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list