[arvados] updated: 2.6.0-363-gea3ab8451
git repository hosting
git at public.arvados.org
Fri Aug 4 18:08:28 UTC 2023
Summary of changes:
doc/install/salt-multi-host.html.textile.liquid | 47 ++++++++--------------
.../multi_host/aws/pillars/postgresql.sls | 4 +-
.../local.params.example.multiple_hosts | 10 ++---
tools/salt-install/provision.sh | 4 +-
.../terraform/aws/vpc/terraform.tfvars | 29 ++++++++-----
tools/salt-install/terraform/aws/vpc/variables.tf | 2 +-
6 files changed, 46 insertions(+), 50 deletions(-)
via ea3ab84513005da15795894bf8a85eb06b131ed0 (commit)
via 34a5530a0844fb66270e54f4e53fb7179746a0c0 (commit)
from d9152312a29c8b8e257cc0c42afd43801eafd7f5 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit ea3ab84513005da15795894bf8a85eb06b131ed0
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 4 15:08:04 2023 -0300
20610: Documentation changes, including simplification & fixes.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/doc/install/salt-multi-host.html.textile.liquid b/doc/install/salt-multi-host.html.textile.liquid
index e9bc96705..cad067544 100644
--- a/doc/install/salt-multi-host.html.textile.liquid
+++ b/doc/install/salt-multi-host.html.textile.liquid
@@ -230,18 +230,17 @@ The installer will set up the Arvados services on your machines. Here is the de
## postgresql server
## arvados api server
## arvados controller (recommendend hostname @controller.${DOMAIN}@)
-## arvados websocket (recommendend hostname @ws.${DOMAIN}@)
-## arvados cloud dispatcher
-## arvados keepbalance
# KEEPSTORE nodes (at least 1 if using S3 as a Keep backend, else 2)
## arvados keepstore (recommendend hostnames @keep0.${DOMAIN}@ and @keep1.${DOMAIN}@)
-# KEEPPROXY node
-## arvados keepproxy (recommendend hostname @keep.${DOMAIN}@)
-## arvados keepweb (recommendend hostname @download.${DOMAIN}@ and @*.collections.${DOMAIN}@)
# WORKBENCH node
## arvados workbench (recommendend hostname @workbench.${DOMAIN}@)
## arvados workbench2 (recommendend hostname @workbench2.${DOMAIN}@)
## arvados webshell (recommendend hostname @webshell.${DOMAIN}@)
+## arvados websocket (recommendend hostname @ws.${DOMAIN}@)
+## arvados cloud dispatcher
+## arvados keepbalance
+## arvados keepproxy (recommendend hostname @keep.${DOMAIN}@)
+## arvados keepweb (recommendend hostname @download.${DOMAIN}@ and @*.collections.${DOMAIN}@)
# SHELL node (optional)
## arvados shell (recommended hostname @shell.${DOMAIN}@)
@@ -483,20 +482,20 @@ Once logged in, you will want to add the dashboards to the front page.
h2(#load_balancing). Load balancing controllers (optional)
-In order to handle high loads and perform rolling upgrades, the controller & api services can be scaled to a number of hosts and the installer make this implementation a fairly simple task.
+In order to handle high loads and perform rolling upgrades, the controller service can be scaled to a number of hosts and the installer make this implementation a fairly simple task.
First, you should take care of the infrastructure deployment: if you use our Terraform code, you will need to set up the @terraform.tfvars@ in @terraform/vpc/@ so that in addition to the node named @controller@ (the load-balancer), a number of @controllerN@ nodes (backends) are defined as needed, and added to the @internal_service_hosts@ list.
-We suggest that the backend nodes just hold the controller & api services and nothing else, so they can be easily created or destroyed as needed without other service disruption. Because of this, you will need to set up a custom @dns_aliases@ variable map.
+We suggest that the backend nodes just hold the controller service and nothing else, so they can be easily created or destroyed as needed without other service disruption.
-The following is an example @terraform/vpc/terraform.tfvars@ file that describes a cluster with a load-balancer, 2 backend nodes, a separate database node, a keepstore node and a workbench node that will also hold other miscelaneous services:
+The following is an example @terraform/vpc/terraform.tfvars@ file that describes a cluster with a load-balancer, 2 backend nodes, a separate database node, a shell node, a keepstore node and a workbench node that will also hold other miscelaneous services:
<pre><code>region_name = "us-east-1"
cluster_name = "xarv1"
domain_name = "xarv1.example.com"
# Include controller nodes in this list so instances are assigned to the
# private subnet. Only the balancer node should be connecting to them.
-internal_service_hosts = [ "keep0", "database", "controller1", "controller2" ]
+internal_service_hosts = [ "keep0", "shell", "database", "controller1", "controller2" ]
# Assign private IPs for the controller nodes. These will be used to create
# internal DNS resolutions that will get used by the balancer and database nodes.
@@ -506,36 +505,22 @@ private_ip = {
database = "10.1.2.12"
controller1 = "10.1.2.21"
controller2 = "10.1.2.22"
+ shell = "10.1.2.17"
keep0 = "10.1.2.13"
-}
-
-# Some services that used to run on the non-balanced controller node need to be
-# moved to another. Here we assign DNS aliases because they will run on the
-# workbench node.
-dns_aliases = {
- workbench = [
- "ws",
- "workbench2",
- "keep",
- "download",
- "prometheus",
- "grafana",
- "*.collections"
- ]
}</code></pre>
-Once the infrastructure is deployed, you'll then need to define which node will be using the @balancer@ role and which will be the @controller@ nodes in @local.params@, as it's being shown in this partial example. Note how the workbench node got the majority of the other roles, reflecting the above terraform configuration example:
+Once the infrastructure is deployed, you'll then need to define which node will be using the @balancer@ role and which will be the @controller@ nodes in @local.params@, as it's being shown in this partial example:
-<pre><code>...
-NODES=(
+<pre><code>NODES=(
[controller.${DOMAIN}]=balancer
[controller1.${DOMAIN}]=controller
[controller2.${DOMAIN}]=controller
[database.${DOMAIN}]=database
- [workbench.${DOMAIN}]=monitoring,workbench,workbench2,keepproxy,keepweb,websocket,keepbalance,dispatcher
- [keep0.${DOMAIN}]=keepstore
+ ...
)
-...</code></pre>
+</code></pre>
+
+Note that we also set the @database@ role to its own node.
h3(#rolling-upgrades). Rolling upgrades procedure
diff --git a/tools/salt-install/terraform/aws/vpc/terraform.tfvars b/tools/salt-install/terraform/aws/vpc/terraform.tfvars
index bbc5f8495..867034624 100644
--- a/tools/salt-install/terraform/aws/vpc/terraform.tfvars
+++ b/tools/salt-install/terraform/aws/vpc/terraform.tfvars
@@ -27,18 +27,29 @@
# Optional cluster service nodes configuration:
#
-# List of node names which either will be hosting user-facing or internal services
-# user_facing_hosts = ["node1", "node2", ...]
-# internal_service_hosts = ["node3", ...]
+# List of node names which either will be hosting user-facing or internal
+# services. Defaults:
+# user_facing_hosts = [ "controller", "workbench" ]
+# internal_service_hosts = [ "keep0", "shell" ]
#
-# Map assigning each node name an internal IP address
+# Map assigning each node name an internal IP address. Defaults:
# private_ip = {
-# node1 = "1.2.3.4"
-# ...
+# controller = "10.1.1.11"
+# workbench = "10.1.1.15"
+# shell = "10.1.2.17"
+# keep0 = "10.1.2.13"
# }
#
-# Map assigning DNS aliases for service node names
+# Map assigning DNS aliases for service node names. Defaults:
# dns_aliases = {
-# node1 = ["alias1", "alias2", ...]
-# ...
+# workbench = [
+# "ws",
+# "workbench2",
+# "webshell",
+# "keep",
+# "download",
+# "prometheus",
+# "grafana",
+# "*.collections"
+# ]
# }
\ No newline at end of file
commit 34a5530a0844fb66270e54f4e53fb7179746a0c0
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Fri Aug 4 14:48:29 2023 -0300
20610: Changes the default installer config in order to simplify documentation.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
index cc9d7eedf..70edfeb8d 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls
@@ -5,7 +5,7 @@
{%- set domain = "__DOMAIN__" %}
{%- set controller_nodes = "__CONTROLLER_NODES__".split(",") %}
-{%- set dispatcher_ip = "__DISPATCHER_INT_IP__" %}
+{%- set websocket_ip = "__WEBSOCKET_INT_IP__" %}
{%- set keepbalance_ip = "__KEEPBALANCE_INT_IP__" %}
### POSTGRESQL
@@ -22,7 +22,7 @@ postgres:
- ['host', 'all', 'all', '127.0.0.1/32', 'md5']
- ['host', 'all', 'all', '::1/128', 'md5']
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']
- - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ dispatcher_ip }}/32']
+ - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ websocket_ip }}/32']
- ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ keepbalance_ip }}/32']
{%- for controller_hostname in controller_nodes %}
{%- set controller_ip = salt['cmd.run']("getent hosts "+controller_hostname+" | awk '{print $1 ; exit}'", python_shell=True) %}
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 2c3d3c616..fde79cc25 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -96,8 +96,8 @@ MONITORING_EMAIL=${INITIAL_USER_EMAIL}
# installer.sh will log in to each of these nodes and then provision
# it for the specified roles.
NODES=(
- [controller.${DOMAIN}]=database,controller,websocket,dispatcher,keepbalance
- [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb
+ [controller.${DOMAIN}]=database,controller
+ [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance
[keep0.${DOMAIN}]=keepstore
[shell.${DOMAIN}]=shell
)
@@ -121,11 +121,11 @@ CLUSTER_INT_CIDR=10.1.0.0/16
# Note the IPs in this example are shared between roles, as suggested in
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
-DISPATCHER_INT_IP=${CONTROLLER_INT_IP}
-KEEPBALANCE_INT_IP=${CONTROLLER_INT_IP}
-WEBSOCKET_INT_IP=${CONTROLLER_INT_IP}
DATABASE_INT_IP=${CONTROLLER_INT_IP}
WORKBENCH1_INT_IP=10.1.1.15
+DISPATCHER_INT_IP=${WORKBENCH1_INT_IP}
+KEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}
+WEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}
# Both for collections and downloads
KEEPWEB_INT_IP=${WORKBENCH1_INT_IP}
WORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index eefd0572a..bf76b2abf 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -209,8 +209,8 @@ apply_var_substitutions() {
s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\n'/\\n}#g;
s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;
s#__DISABLED_CONTROLLER__#${DISABLED_CONTROLLER}#g;
- s#__BALANCER_NODENAME__#${ROLE2NODES['balancer']}#g;
- s#__PROMETHEUS_NODENAME__#${ROLE2NODES['monitoring']}#g;
+ s#__BALANCER_NODENAME__#${ROLE2NODES['balancer']:-}#g;
+ s#__PROMETHEUS_NODENAME__#${ROLE2NODES['monitoring']:-}#g;
s#__CONTROLLER_NODES__#${ROLE2NODES['controller']}#g;
s#__NODELIST__#${NODELIST}#g;
s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;
diff --git a/tools/salt-install/terraform/aws/vpc/variables.tf b/tools/salt-install/terraform/aws/vpc/variables.tf
index b91cc4214..c8d366a19 100644
--- a/tools/salt-install/terraform/aws/vpc/variables.tf
+++ b/tools/salt-install/terraform/aws/vpc/variables.tf
@@ -54,8 +54,8 @@ variable "dns_aliases" {
description = "Sets DNS name aliases for every service node"
type = map(list(string))
default = {
- controller = ["ws"]
workbench = [
+ "ws",
"workbench2",
"webshell",
"keep",
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list