[arvados] created: 2.5.0-309-g7aeaedbd8
git repository hosting
git at public.arvados.org
Tue Mar 28 17:22:15 UTC 2023
at 7aeaedbd8009c596bfc159432bb7b1f09c19ed72 (commit)
commit 7aeaedbd8009c596bfc159432bb7b1f09c19ed72
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Tue Mar 28 14:19:57 2023 -0300
20270: Removes keep1 & keepproxy nodes. Uses SSH jumphost to deploy nodes.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
index 25f68ca04..b33282f18 100644
--- a/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls
@@ -169,7 +169,6 @@ arvados:
Keepstore:
InternalURLs:
'http://__KEEPSTORE0_INT_IP__:25107': {}
- 'http://__KEEPSTORE1_INT_IP__:25107': {}
RailsAPI:
InternalURLs:
'http://localhost:8004': {}
diff --git a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
index 6e0deb49c..68aeab3ab 100644
--- a/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
+++ b/tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls
@@ -69,9 +69,3 @@ extra_extra_hosts_entries_etc_hosts_keep0_host_present:
- ip: __KEEPSTORE0_INT_IP__
- names:
- keep0.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
-
-extra_extra_hosts_entries_etc_hosts_keep1_host_present:
- host.present:
- - ip: __KEEPSTORE1_INT_IP__
- - names:
- - keep1.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index e06c0237a..0f1d16dde 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -43,6 +43,10 @@ declare DEPLOY_USER
# This will be populated by loadconfig()
declare GITTARGET
+# The public host used as an SSH jump host
+# This will be populated by loadconfig()
+declare USE_SSH_JUMPHOST
+
checktools() {
local MISSING=''
for a in git ip ; do
@@ -64,31 +68,33 @@ sync() {
# each node, pushing our branch, and updating the checkout.
if [[ "$NODE" != localhost ]] ; then
- if ! ssh $DEPLOY_USER@$NODE test -d ${GITTARGET}.git ; then
-
- # Initialize the git repository (1st time case). We're
- # actually going to make two repositories here because git
- # will complain if you try to push to a repository with a
- # checkout. So we're going to create a "bare" repository
- # and then clone a regular repository (with a checkout)
- # from that.
-
- ssh $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git
- if ! git remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git ; then
- git remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git
- fi
- git push $NODE $BRANCH
- ssh $DEPLOY_USER@$NODE "umask 0077 && git clone ${GITTARGET}.git ${GITTARGET}"
- fi
+ SSH=`ssh_cmd "$NODE"`
+ GIT="eval `git_cmd $NODE`"
+ if ! $SSH $DEPLOY_USER@$NODE test -d ${GITTARGET}.git ; then
+
+ # Initialize the git repository (1st time case). We're
+ # actually going to make two repositories here because git
+ # will complain if you try to push to a repository with a
+ # checkout. So we're going to create a "bare" repository
+ # and then clone a regular repository (with a checkout)
+ # from that.
+
+ $SSH $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git
+ if ! $GIT remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git ; then
+ $GIT remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git
+ fi
+ $GIT push $NODE $BRANCH
+ $SSH $DEPLOY_USER@$NODE "umask 0077 && git clone ${GITTARGET}.git ${GITTARGET}"
+ fi
- # The update case.
- #
- # Push to the bare repository on the remote node, then in the
- # remote node repository with the checkout, pull the branch
- # from the bare repository.
+ # The update case.
+ #
+ # Push to the bare repository on the remote node, then in the
+ # remote node repository with the checkout, pull the branch
+ # from the bare repository.
- git push $NODE $BRANCH
- ssh $DEPLOY_USER@$NODE "git -C ${GITTARGET} checkout ${BRANCH} && git -C ${GITTARGET} pull"
+ $GIT push $NODE $BRANCH
+ $SSH $DEPLOY_USER@$NODE "git -C ${GITTARGET} checkout ${BRANCH} && git -C ${GITTARGET} pull"
fi
}
@@ -100,32 +106,47 @@ deploynode() {
# the appropriate roles.
if [[ -z "$ROLES" ]] ; then
- echo "No roles specified for $NODE, will deploy all roles"
+ echo "No roles specified for $NODE, will deploy all roles"
else
- ROLES="--roles ${ROLES}"
+ ROLES="--roles ${ROLES}"
fi
logfile=deploy-${NODE}-$(date -Iseconds).log
+ SSH=`ssh_cmd "$NODE"`
if [[ "$NODE" = localhost ]] ; then
SUDO=''
- if [[ $(whoami) != 'root' ]] ; then
- SUDO=sudo
- fi
- $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile
- else
- ssh $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile
+ if [[ $(whoami) != 'root' ]] ; then
+ SUDO=sudo
+ fi
+ $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile
+ else
+ $SSH $DEPLOY_USER@$NODE "cd ${GITTARGET} && sudo ./provision.sh --config ${CONFIG_FILE} ${ROLES}" 2>&1 | tee $logfile
fi
}
loadconfig() {
if [[ ! -s $CONFIG_FILE ]] ; then
- echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
+ echo "Must be run from initialized setup dir, maybe you need to 'initialize' first?"
fi
source ${CONFIG_FILE}
GITTARGET=arvados-deploy-config-${CLUSTER}
}
+ssh_cmd() {
+ local NODE=$1
+ if [ -z "${USE_SSH_JUMPHOST}" -o "${NODE}" == "${USE_SSH_JUMPHOST}" -o "${NODE}" == "localhost" ]; then
+ echo "ssh"
+ else
+ echo "ssh -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}"
+ fi
+}
+
+git_cmd() {
+ local NODE=$1
+ echo "GIT_SSH_COMMAND=\"`ssh_cmd ${NODE}`\" git"
+}
+
set +u
subcmd="$1"
set -u
@@ -208,9 +229,9 @@ case "$subcmd" in
terraform)
logfile=terraform-$(date -Iseconds).log
- (cd terraform/vpc && terraform apply) 2>&1 | tee -a $logfile
- (cd terraform/data-storage && terraform apply) 2>&1 | tee -a $logfile
- (cd terraform/services && terraform apply) 2>&1 | grep -v letsencrypt_iam_secret_access_key | tee -a $logfile
+ (cd terraform/vpc && terraform apply -auto-approve) 2>&1 | tee -a $logfile
+ (cd terraform/data-storage && terraform apply -auto-approve) 2>&1 | tee -a $logfile
+ (cd terraform/services && terraform apply -auto-approve) 2>&1 | grep -v letsencrypt_iam_secret_access_key | tee -a $logfile
(cd terraform/services && echo -n 'letsencrypt_iam_secret_access_key = ' && terraform output letsencrypt_iam_secret_access_key) 2>&1 | tee -a $logfile
;;
diff --git a/tools/salt-install/local.params.example.multiple_hosts b/tools/salt-install/local.params.example.multiple_hosts
index 0064a78c5..01a321c4a 100644
--- a/tools/salt-install/local.params.example.multiple_hosts
+++ b/tools/salt-install/local.params.example.multiple_hosts
@@ -21,10 +21,8 @@ DEPLOY_USER=root
# it for the specified roles.
NODES=(
[controller.${CLUSTER}.${DOMAIN}]=database,api,controller,websocket,dispatcher,keepbalance
+ [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell,keepproxy,keepweb
[keep0.${CLUSTER}.${DOMAIN}]=keepstore
- [keep1.${CLUSTER}.${DOMAIN}]=keepstore
- [keep.${CLUSTER}.${DOMAIN}]=keepproxy,keepweb
- [workbench.${CLUSTER}.${DOMAIN}]=workbench,workbench2,webshell
[shell.${CLUSTER}.${DOMAIN}]=shell
)
@@ -48,16 +46,15 @@ CLUSTER_INT_CIDR=10.1.0.0/16
# https://doc.arvados.org/main/install/salt-multi-host.html
CONTROLLER_INT_IP=10.1.1.11
WEBSOCKET_INT_IP=10.1.1.11
-KEEP_INT_IP=10.1.1.12
+KEEP_INT_IP=10.1.1.15
# Both for collections and downloads
-KEEPWEB_INT_IP=10.1.1.12
-KEEPSTORE0_INT_IP=10.1.1.13
-KEEPSTORE1_INT_IP=10.1.1.14
+KEEPWEB_INT_IP=10.1.1.15
+KEEPSTORE0_INT_IP=10.1.2.13
WORKBENCH1_INT_IP=10.1.1.15
WORKBENCH2_INT_IP=10.1.1.15
WEBSHELL_INT_IP=10.1.1.15
DATABASE_INT_IP=10.1.1.11
-SHELL_INT_IP=10.1.1.17
+SHELL_INT_IP=10.1.2.17
INITIAL_USER="admin"
@@ -66,6 +63,12 @@ INITIAL_USER="admin"
INITIAL_USER_EMAIL="admin at cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work"
INITIAL_USER_PASSWORD="fixmepassword"
+# Use a public node as a jump host for SSH sessions. This allows running the
+# installer from the outside of the cluster's local network and still reach
+# the internal servers for configuration deployment.
+# Comment out to disable.
+USE_SSH_JUMPHOST="controller.${CLUSTER}.${DOMAIN}"
+
# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS
BLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters
MANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters
diff --git a/tools/salt-install/provision.sh b/tools/salt-install/provision.sh
index 86335ff8e..05a41ded6 100755
--- a/tools/salt-install/provision.sh
+++ b/tools/salt-install/provision.sh
@@ -428,7 +428,6 @@ for f in $(ls "${SOURCE_PILLARS_DIR}"/*); do
s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g;
s#__KEEP_INT_IP__#${KEEP_INT_IP}#g;
s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g;
- s#__KEEPSTORE1_INT_IP__#${KEEPSTORE1_INT_IP}#g;
s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g;
s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g;
s#__SHELL_INT_IP__#${SHELL_INT_IP}#g;
@@ -498,7 +497,6 @@ if [ -d "${SOURCE_STATES_DIR}" ]; then
s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g;
s#__KEEP_INT_IP__#${KEEP_INT_IP}#g;
s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g;
- s#__KEEPSTORE1_INT_IP__#${KEEPSTORE1_INT_IP}#g;
s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g;
s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g;
s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;
commit 187979032338beb7656e56ab21944d53fd7f4569
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Tue Mar 28 14:17:30 2023 -0300
20270: Refactors the VPC code so that private nodes can access the Internet.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/terraform/aws/services/main.tf b/tools/salt-install/terraform/aws/services/main.tf
index 457aabc31..7ec3b954e 100644
--- a/tools/salt-install/terraform/aws/services/main.tf
+++ b/tools/salt-install/terraform/aws/services/main.tf
@@ -52,7 +52,7 @@ resource "aws_instance" "arvados_service" {
"hostname": each.value
})
private_ip = local.private_ip[each.value]
- subnet_id = data.terraform_remote_state.vpc.outputs.arvados_subnet_id
+ subnet_id = contains(local.public_hosts, each.value) ? data.terraform_remote_state.vpc.outputs.public_subnet_id : data.terraform_remote_state.vpc.outputs.private_subnet_id
vpc_security_group_ids = [ data.terraform_remote_state.vpc.outputs.arvados_sg_id ]
# This should be done in a more readable way
iam_instance_profile = each.value == "controller" ? aws_iam_instance_profile.dispatcher_instance_profile.name : length(regexall("^keep[0-9]+", each.value)) > 0 ? aws_iam_instance_profile.keepstore_instance_profile.name : aws_iam_instance_profile.default_instance_profile.name
diff --git a/tools/salt-install/terraform/aws/services/outputs.tf b/tools/salt-install/terraform/aws/services/outputs.tf
index 0c29420e8..9dbccf81c 100644
--- a/tools/salt-install/terraform/aws/services/outputs.tf
+++ b/tools/salt-install/terraform/aws/services/outputs.tf
@@ -11,10 +11,10 @@ output "vpc_cidr" {
}
output "arvados_subnet_id" {
- value = data.terraform_remote_state.vpc.outputs.arvados_subnet_id
+ value = data.terraform_remote_state.vpc.outputs.public_subnet_id
}
output "compute_subnet_id" {
- value = data.terraform_remote_state.vpc.outputs.compute_subnet_id
+ value = data.terraform_remote_state.vpc.outputs.private_subnet_id
}
output "arvados_sg_id" {
diff --git a/tools/salt-install/terraform/aws/vpc/locals.tf b/tools/salt-install/terraform/aws/vpc/locals.tf
index ed02fb85a..289eb3e04 100644
--- a/tools/salt-install/terraform/aws/vpc/locals.tf
+++ b/tools/salt-install/terraform/aws/vpc/locals.tf
@@ -16,8 +16,8 @@ locals {
private_ip = {
"controller": "10.1.1.11",
"workbench": "10.1.1.15",
- "shell": "10.1.1.17",
- "keep0": "10.1.1.13",
+ "shell": "10.1.2.17",
+ "keep0": "10.1.2.13",
}
aliases = {
controller: ["ws"]
diff --git a/tools/salt-install/terraform/aws/vpc/main.tf b/tools/salt-install/terraform/aws/vpc/main.tf
index 94d245c3d..eba48b9f9 100644
--- a/tools/salt-install/terraform/aws/vpc/main.tf
+++ b/tools/salt-install/terraform/aws/vpc/main.tf
@@ -24,12 +24,12 @@ resource "aws_vpc" "arvados_vpc" {
enable_dns_hostnames = true
enable_dns_support = true
}
-resource "aws_subnet" "arvados_subnet" {
+resource "aws_subnet" "public_subnet" {
vpc_id = aws_vpc.arvados_vpc.id
availability_zone = local.availability_zone
cidr_block = "10.1.1.0/24"
}
-resource "aws_subnet" "compute_subnet" {
+resource "aws_subnet" "private_subnet" {
vpc_id = aws_vpc.arvados_vpc.id
availability_zone = local.availability_zone
cidr_block = "10.1.2.0/24"
@@ -42,62 +42,58 @@ resource "aws_vpc_endpoint" "s3" {
vpc_id = aws_vpc.arvados_vpc.id
service_name = "com.amazonaws.${var.region_name}.s3"
}
-resource "aws_vpc_endpoint_route_table_association" "arvados_s3_route" {
- vpc_endpoint_id = aws_vpc_endpoint.s3.id
- route_table_id = aws_route_table.arvados_subnet_rt.id
-}
resource "aws_vpc_endpoint_route_table_association" "compute_s3_route" {
vpc_endpoint_id = aws_vpc_endpoint.s3.id
- route_table_id = aws_route_table.compute_subnet_rt.id
+ route_table_id = aws_route_table.private_subnet_rt.id
}
#
# Internet access for Public IP instances
#
-resource "aws_internet_gateway" "arvados_gw" {
+resource "aws_internet_gateway" "internet_gw" {
vpc_id = aws_vpc.arvados_vpc.id
}
resource "aws_eip" "arvados_eip" {
for_each = toset(local.public_hosts)
depends_on = [
- aws_internet_gateway.arvados_gw
+ aws_internet_gateway.internet_gw
]
}
-resource "aws_route_table" "arvados_subnet_rt" {
+resource "aws_route_table" "public_subnet_rt" {
vpc_id = aws_vpc.arvados_vpc.id
route {
cidr_block = "0.0.0.0/0"
- gateway_id = aws_internet_gateway.arvados_gw.id
+ gateway_id = aws_internet_gateway.internet_gw.id
}
}
-resource "aws_route_table_association" "arvados_subnet_assoc" {
- subnet_id = aws_subnet.arvados_subnet.id
- route_table_id = aws_route_table.arvados_subnet_rt.id
+resource "aws_route_table_association" "public_subnet_assoc" {
+ subnet_id = aws_subnet.public_subnet.id
+ route_table_id = aws_route_table.public_subnet_rt.id
}
#
# Internet access for Private IP instances
#
-resource "aws_eip" "compute_nat_gw_eip" {
+resource "aws_eip" "nat_gw_eip" {
depends_on = [
- aws_internet_gateway.arvados_gw
+ aws_internet_gateway.internet_gw
]
}
-resource "aws_nat_gateway" "compute_nat_gw" {
+resource "aws_nat_gateway" "nat_gw" {
# A NAT gateway should be placed on a subnet with an internet gateway
- subnet_id = aws_subnet.arvados_subnet.id
- allocation_id = aws_eip.compute_nat_gw_eip.id
+ subnet_id = aws_subnet.public_subnet.id
+ allocation_id = aws_eip.nat_gw_eip.id
}
-resource "aws_route_table" "compute_subnet_rt" {
+resource "aws_route_table" "private_subnet_rt" {
vpc_id = aws_vpc.arvados_vpc.id
route {
cidr_block = "0.0.0.0/0"
- nat_gateway_id = aws_nat_gateway.compute_nat_gw.id
+ nat_gateway_id = aws_nat_gateway.nat_gw.id
}
}
-resource "aws_route_table_association" "compute_subnet_assoc" {
- subnet_id = aws_subnet.compute_subnet.id
- route_table_id = aws_route_table.compute_subnet_rt.id
+resource "aws_route_table_association" "private_subnet_assoc" {
+ subnet_id = aws_subnet.private_subnet.id
+ route_table_id = aws_route_table.private_subnet_rt.id
}
resource "aws_security_group" "arvados_sg" {
diff --git a/tools/salt-install/terraform/aws/vpc/outputs.tf b/tools/salt-install/terraform/aws/vpc/outputs.tf
index 9fe16358b..09faa04a2 100644
--- a/tools/salt-install/terraform/aws/vpc/outputs.tf
+++ b/tools/salt-install/terraform/aws/vpc/outputs.tf
@@ -9,12 +9,12 @@ output "arvados_vpc_cidr" {
value = aws_vpc.arvados_vpc.cidr_block
}
-output "arvados_subnet_id" {
- value = aws_subnet.arvados_subnet.id
+output "public_subnet_id" {
+ value = aws_subnet.public_subnet.id
}
-output "compute_subnet_id" {
- value = aws_subnet.compute_subnet.id
+output "private_subnet_id" {
+ value = aws_subnet.private_subnet.id
}
output "arvados_sg_id" {
commit 9d031e1c35662393daf7611a8fc81f3c3c22623c
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Mon Mar 27 14:43:31 2023 -0300
20270: Copies .gitignore file to avoid adding binaries to the repository.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/installer.sh b/tools/salt-install/installer.sh
index 21f36faac..e06c0237a 100755
--- a/tools/salt-install/installer.sh
+++ b/tools/salt-install/installer.sh
@@ -181,11 +181,16 @@ case "$subcmd" in
if [[ -n "$TERRAFORM" ]] ; then
mkdir $SETUPDIR/terraform
cp -r $TERRAFORM/* $SETUPDIR/terraform/
+ cp $TERRAFORM/.gitignore $SETUPDIR/terraform/
fi
cd $SETUPDIR
echo '*.log' > .gitignore
+ if [[ -n "$TERRAFORM" ]] ; then
+ git add terraform
+ fi
+
git add *.sh ${CONFIG_FILE} ${CONFIG_DIR} tests .gitignore
git commit -m"initial commit"
commit 47592afcdea474cd6c8900544a57e86292e12e59
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Mon Mar 27 14:15:02 2023 -0300
20270: Splits hosts into public & private, saving on public IPs requirements.
Also, asks for less number of instances: there's no need to multiple keepstore
nodes, and keep-web can be run on the same node as workbench.
This makes the basic default cluster to go from 6 to 4 nodes.
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/salt-install/terraform/aws/services/locals.tf b/tools/salt-install/terraform/aws/services/locals.tf
index 6a81967cf..523954ce3 100644
--- a/tools/salt-install/terraform/aws/services/locals.tf
+++ b/tools/salt-install/terraform/aws/services/locals.tf
@@ -10,6 +10,7 @@ locals {
private_ip = data.terraform_remote_state.vpc.outputs.private_ip
pubkey_path = pathexpand(var.pubkey_path)
pubkey_name = "arvados-deployer-key"
- hostnames = [ for hostname, eip_id in data.terraform_remote_state.vpc.outputs.eip_id: hostname ]
+ public_hosts = data.terraform_remote_state.vpc.outputs.public_hosts
+ private_hosts = data.terraform_remote_state.vpc.outputs.private_hosts
ssl_password_secret_name = "${local.cluster_name}-${var.ssl_password_secret_name_suffix}"
}
diff --git a/tools/salt-install/terraform/aws/services/main.tf b/tools/salt-install/terraform/aws/services/main.tf
index 9c27b9726..457aabc31 100644
--- a/tools/salt-install/terraform/aws/services/main.tf
+++ b/tools/salt-install/terraform/aws/services/main.tf
@@ -44,7 +44,7 @@ resource "aws_iam_instance_profile" "default_instance_profile" {
}
resource "aws_instance" "arvados_service" {
- for_each = toset(local.hostnames)
+ for_each = toset(concat(local.public_hosts, local.private_hosts))
ami = data.aws_ami.debian-11.image_id
instance_type = var.default_instance_type
key_name = local.pubkey_name
@@ -107,7 +107,7 @@ resource "aws_iam_policy_attachment" "cloud_dispatcher_ec2_access_attachment" {
}
resource "aws_eip_association" "eip_assoc" {
- for_each = toset(local.hostnames)
+ for_each = toset(local.public_hosts)
instance_id = aws_instance.arvados_service[each.value].id
allocation_id = data.terraform_remote_state.vpc.outputs.eip_id[each.value]
}
diff --git a/tools/salt-install/terraform/aws/vpc/locals.tf b/tools/salt-install/terraform/aws/vpc/locals.tf
index 8338aec7c..ed02fb85a 100644
--- a/tools/salt-install/terraform/aws/vpc/locals.tf
+++ b/tools/salt-install/terraform/aws/vpc/locals.tf
@@ -9,21 +9,19 @@ locals {
ssh: "22",
}
availability_zone = data.aws_availability_zones.available.names[0]
- hostnames = [ "controller", "workbench", "keep0", "keep1", "keepproxy", "shell" ]
+ public_hosts = [ "controller", "workbench" ]
+ private_hosts = [ "keep0", "shell" ]
arvados_dns_zone = "${var.cluster_name}.${var.domain_name}"
public_ip = { for k, v in aws_eip.arvados_eip: k => v.public_ip }
private_ip = {
"controller": "10.1.1.11",
"workbench": "10.1.1.15",
- "keepproxy": "10.1.1.12",
"shell": "10.1.1.17",
"keep0": "10.1.1.13",
- "keep1": "10.1.1.14"
}
aliases = {
controller: ["ws"]
- workbench: ["workbench2", "webshell"]
- keepproxy: ["keep", "download", "*.collections"]
+ workbench: ["workbench2", "webshell", "keep", "download", "*.collections"]
}
cname_by_host = flatten([
for host, aliases in local.aliases : [
diff --git a/tools/salt-install/terraform/aws/vpc/main.tf b/tools/salt-install/terraform/aws/vpc/main.tf
index 6e2113924..94d245c3d 100644
--- a/tools/salt-install/terraform/aws/vpc/main.tf
+++ b/tools/salt-install/terraform/aws/vpc/main.tf
@@ -58,7 +58,7 @@ resource "aws_internet_gateway" "arvados_gw" {
vpc_id = aws_vpc.arvados_vpc.id
}
resource "aws_eip" "arvados_eip" {
- for_each = toset(local.hostnames)
+ for_each = toset(local.public_hosts)
depends_on = [
aws_internet_gateway.arvados_gw
]
diff --git a/tools/salt-install/terraform/aws/vpc/outputs.tf b/tools/salt-install/terraform/aws/vpc/outputs.tf
index dd58ca700..9fe16358b 100644
--- a/tools/salt-install/terraform/aws/vpc/outputs.tf
+++ b/tools/salt-install/terraform/aws/vpc/outputs.tf
@@ -29,10 +29,18 @@ output "public_ip" {
value = local.public_ip
}
+output "public_hosts" {
+ value = local.public_hosts
+}
+
output "private_ip" {
value = local.private_ip
}
+output "private_hosts" {
+ value = local.private_hosts
+}
+
output "route53_dns_ns" {
value = aws_route53_zone.public_zone.name_servers
}
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list