[arvados] updated: 2.7.1-28-g17c47b1807
git repository hosting
git at public.arvados.org
Tue Mar 19 14:21:49 UTC 2024
Summary of changes:
doc/user/cwl/cwl-extensions.html.textile.liquid | 6 +-
lib/cloud/ec2/ec2.go | 6 +
lib/cloud/ec2/ec2_test.go | 6 +
lib/config/config.default.yml | 65 ++++-
lib/controller/integration_test.go | 2 +-
sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml | 9 +-
sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml | 9 +-
sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml | 10 +-
sdk/cwl/arvados_cwl/arvcontainer.py | 9 +-
sdk/cwl/tests/arvados-tests.yml | 5 +
.../oom/{19975-oom.cwl => 19975-oom-mispelled.cwl} | 1 +
sdk/cwl/tests/oom/19975-oom.cwl | 2 +-
sdk/cwl/tests/oom/19975-oom3.cwl | 2 +-
.../client/api/client/BaseStandardApiClient.java | 4 +-
.../client/api/client/CollectionsApiClient.java | 14 +
.../client/api/client/KeepWebApiClient.java | 25 ++
.../client/api/model/CollectionReplaceFiles.java | 70 +++++
.../org/arvados/client/facade/ArvadosFacade.java | 16 ++
.../arvados/client/logic/keep/FileDownloader.java | 33 +++
.../api/client/CollectionsApiClientTest.java | 67 ++++-
.../client/api/client/KeepWebApiClientTest.java | 44 ++-
.../client/logic/keep/FileDownloaderTest.java | 43 ++-
sdk/python/arvados/commands/_util.py | 99 ++++++-
sdk/python/tests/test_cmd_util.py | 194 ++++++++++++++
services/api/app/models/user.rb | 20 +-
services/api/test/fixtures/collections.yml | 45 ++++
services/api/test/fixtures/groups.yml | 11 +
.../functional/arvados/v1/users_controller_test.rb | 31 +++
services/fuse/arvados_fuse/command.py | 24 +-
services/fuse/arvados_fuse/fusedir.py | 294 +++++++++++++++------
services/fuse/tests/mount_test_base.py | 19 +-
services/fuse/tests/test_mount.py | 5 +-
services/fuse/tests/test_mount_filters.py | 223 ++++++++++++++++
tools/compute-images/scripts/base.sh | 2 +-
...nsure-encrypted-partitions-aws-ebs-autoscale.sh | 2 +-
35 files changed, 1274 insertions(+), 143 deletions(-)
copy sdk/cwl/tests/oom/{19975-oom.cwl => 19975-oom-mispelled.cwl} (86%)
create mode 100644 sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java
create mode 100644 sdk/python/tests/test_cmd_util.py
create mode 100644 services/fuse/tests/test_mount_filters.py
via 17c47b18079a893ef46070956e43d5c5dfc1bf2d (commit)
via 0fd7af7f304412f82977d39cc37045babb6b401d (commit)
via 1b3afde7289e7f5dc3798d56adaff3bc0a552b2a (commit)
via c53754235bd90ff0db956fa12bde467f17d20928 (commit)
via 89d7d0839427c46f82c0df351456df811b4a9e27 (commit)
via aeafe22313edb1633e6f5ce14b883ea6f2962b34 (commit)
via 010c28c7cf3f7d63b6d89f039a72646c48bec4f3 (commit)
via 1bbd9a87f928257d44cf1a4a7f576cd49ab3e062 (commit)
from 5ec8ceec97c75ad8f9c9a850a5c248ebade33198 (commit)
Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.
commit 17c47b18079a893ef46070956e43d5c5dfc1bf2d
Author: Lucas Di Pentima <lucas.dipentima at curii.com>
Date: Mon Mar 4 15:15:33 2024 -0300
Merge branch '21552-ebs-autoscale-update'. Closes #21552
Arvados-DCO-1.1-Signed-off-by: Lucas Di Pentima <lucas.dipentima at curii.com>
diff --git a/tools/compute-images/scripts/base.sh b/tools/compute-images/scripts/base.sh
index a9323214ce..582ba4c2b0 100644
--- a/tools/compute-images/scripts/base.sh
+++ b/tools/compute-images/scripts/base.sh
@@ -158,7 +158,7 @@ else
unzip -q /tmp/awscliv2.zip -d /tmp && $SUDO /tmp/aws/install
# Pinned to v2.4.5 because we apply a patch below
#export EBS_AUTOSCALE_VERSION=$(curl --silent "https://api.github.com/repos/awslabs/amazon-ebs-autoscale/releases/latest" | jq -r .tag_name)
- export EBS_AUTOSCALE_VERSION="5ca6e24e05787b8ae1184c2a10db80053ddd3038"
+ export EBS_AUTOSCALE_VERSION="ee323f0751c2b6f733692e805b51b9bf3c251bac"
cd /opt && $SUDO git clone https://github.com/arvados/amazon-ebs-autoscale.git
cd /opt/amazon-ebs-autoscale && $SUDO git checkout $EBS_AUTOSCALE_VERSION
diff --git a/tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh b/tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh
index 6f0970b17f..d9790fb45c 100644
--- a/tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh
+++ b/tools/compute-images/scripts/usr-local-bin-ensure-encrypted-partitions-aws-ebs-autoscale.sh
@@ -36,7 +36,7 @@ fi
ensure_umount "$MOUNTPATH/docker/aufs"
-/bin/bash /opt/amazon-ebs-autoscale/install.sh -f lvm.ext4 -m $MOUNTPATH 2>&1 > /var/log/ebs-autoscale-install.log
+/bin/bash /opt/amazon-ebs-autoscale/install.sh --imdsv2 -f lvm.ext4 -m $MOUNTPATH 2>&1 > /var/log/ebs-autoscale-install.log
# Make sure docker uses the big partition
cat <<EOF > /etc/docker/daemon.json
commit 0fd7af7f304412f82977d39cc37045babb6b401d
Author: Tom Clegg <tom at curii.com>
Date: Wed Feb 28 15:48:42 2024 -0500
Merge branch '21552-force-imdsv2'
refs #21552
closes #21565
Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom at curii.com>
diff --git a/lib/cloud/ec2/ec2.go b/lib/cloud/ec2/ec2.go
index 07a146d99f..9a3f784b51 100644
--- a/lib/cloud/ec2/ec2.go
+++ b/lib/cloud/ec2/ec2.go
@@ -251,6 +251,12 @@ func (instanceSet *ec2InstanceSet) Create(
ResourceType: aws.String("instance"),
Tags: ec2tags,
}},
+ MetadataOptions: &ec2.InstanceMetadataOptionsRequest{
+ // Require IMDSv2, as described at
+ // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html
+ HttpEndpoint: aws.String(ec2.InstanceMetadataEndpointStateEnabled),
+ HttpTokens: aws.String(ec2.HttpTokensStateRequired),
+ },
UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
}
diff --git a/lib/cloud/ec2/ec2_test.go b/lib/cloud/ec2/ec2_test.go
index 4b83005896..d342f0fb30 100644
--- a/lib/cloud/ec2/ec2_test.go
+++ b/lib/cloud/ec2/ec2_test.go
@@ -277,6 +277,12 @@ func (*EC2InstanceSetSuite) TestCreate(c *check.C) {
if *live == "" {
c.Check(ap.client.(*ec2stub).describeKeyPairsCalls, check.HasLen, 1)
c.Check(ap.client.(*ec2stub).importKeyPairCalls, check.HasLen, 1)
+
+ runcalls := ap.client.(*ec2stub).runInstancesCalls
+ if c.Check(runcalls, check.HasLen, 1) {
+ c.Check(runcalls[0].MetadataOptions.HttpEndpoint, check.DeepEquals, aws.String("enabled"))
+ c.Check(runcalls[0].MetadataOptions.HttpTokens, check.DeepEquals, aws.String("required"))
+ }
}
}
commit 1b3afde7289e7f5dc3798d56adaff3bc0a552b2a
Author: Peter Amstutz <peter.amstutz at curii.com>
Date: Tue Feb 27 11:13:26 2024 -0500
Merge branch '21416-email-options-doc' refs #21416
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>
diff --git a/lib/config/config.default.yml b/lib/config/config.default.yml
index 9004b3f64f..8b434386ac 100644
--- a/lib/config/config.default.yml
+++ b/lib/config/config.default.yml
@@ -365,34 +365,59 @@ Clusters:
# false.
ActivatedUsersAreVisibleToOthers: true
- # The e-mail address of the user you would like to become marked as an admin
- # user on their first login.
+ # If a user creates an account with this email address, they
+ # will be automatically set to admin.
AutoAdminUserWithEmail: ""
# If AutoAdminFirstUser is set to true, the first user to log in when no
# other admin users exist will automatically become an admin user.
AutoAdminFirstUser: false
- # Email address to notify whenever a user creates a profile for the
- # first time
+ # Recipient for notification email sent out when a user sets a
+ # profile on their account.
UserProfileNotificationAddress: ""
+
+ # When sending a NewUser, NewInactiveUser, or UserProfile
+ # notification, this is the 'From' address to use
AdminNotifierEmailFrom: arvados at example.com
+
+ # Prefix for email subjects for NewUser and NewInactiveUser emails
EmailSubjectPrefix: "[ARVADOS] "
+
+ # When sending a welcome email to the user, the 'From' address to use
UserNotifierEmailFrom: arvados at example.com
- UserNotifierEmailBcc: {}
- NewUserNotificationRecipients: {}
- NewInactiveUserNotificationRecipients: {}
+
+ # The welcome email sent to new users will be blind copied to
+ # these addresses.
+ UserNotifierEmailBcc:
+ SAMPLE: {}
+
+ # Recipients for notification email sent out when a user account
+ # is created and already set up to be able to log in
+ NewUserNotificationRecipients:
+ SAMPLE: {}
+
+ # Recipients for notification email sent out when a user account
+ # has been created but the user cannot log in until they are
+ # set up by an admin.
+ NewInactiveUserNotificationRecipients:
+ SAMPLE: {}
# Set AnonymousUserToken to enable anonymous user access. Populate this
# field with a random string at least 50 characters long.
AnonymousUserToken: ""
- # If a new user has an alternate email address (local at domain)
- # with the domain given here, its local part becomes the new
- # user's default username. Otherwise, the user's primary email
- # address is used.
+ # The login provider for a user may supply a primary email
+ # address and one or more alternate email addresses. If a new
+ # user has an alternate email address with the domain given
+ # here, use the username from the alternate email to generate
+ # the user's Arvados username. Otherwise, the username from
+ # user's primary email address is used for the Arvados username.
+ # Currently implemented for OpenID Connect only.
PreferDomainForUsername: ""
+ # Ruby ERB template used for the email sent out to users when
+ # they have been set up.
UserSetupMailText: |
<% if not @user.full_name.empty? -%>
<%= @user.full_name %>,
@@ -1798,8 +1823,18 @@ Clusters:
Serialize: false
Mail:
- MailchimpAPIKey: ""
- MailchimpListID: ""
+ # In order to send mail, Arvados expects a default SMTP server
+ # on localhost:25. It cannot require authentication on
+ # connections from localhost. That server should be configured
+ # to relay mail to a "real" SMTP server that is able to send
+ # email on behalf of your domain.
+
+ # See also the "Users" configuration section for additional
+ # email-related options.
+
+ # When a user has been set up (meaning they are able to log in)
+ # they will receive an email using the template specified
+ # earlier in Users.UserSetupMailText
SendUserSetupNotificationEmail: true
# Bug/issue report notification to and from addresses
@@ -1809,6 +1844,10 @@ Clusters:
# Generic issue email from
EmailFrom: "arvados at example.com"
+
+ # No longer supported, to be removed.
+ MailchimpAPIKey: ""
+ MailchimpListID: ""
RemoteClusters:
"*":
Host: ""
commit c53754235bd90ff0db956fa12bde467f17d20928
Author: Brett Smith <brett.smith at curii.com>
Date: Tue Feb 13 12:26:35 2024 -0500
Merge branch '21452-fuse-filters'
Closes #21452.
Arvados-DCO-1.1-Signed-off-by: Brett Smith <brett.smith at curii.com>
diff --git a/sdk/python/arvados/commands/_util.py b/sdk/python/arvados/commands/_util.py
index 17454b7d17..6c792b2e0d 100644
--- a/sdk/python/arvados/commands/_util.py
+++ b/sdk/python/arvados/commands/_util.py
@@ -4,12 +4,21 @@
import argparse
import errno
-import os
+import json
import logging
+import os
+import re
import signal
-from future.utils import listitems, listvalues
import sys
+FILTER_STR_RE = re.compile(r'''
+^\(
+\ *(\w+)
+\ *(<|<=|=|>=|>)
+\ *(\w+)
+\ *\)$
+''', re.ASCII | re.VERBOSE)
+
def _pos_int(s):
num = int(s)
if num < 0:
@@ -61,5 +70,89 @@ def install_signal_handlers():
for sigcode in CAUGHT_SIGNALS}
def restore_signal_handlers():
- for sigcode, orig_handler in listitems(orig_signal_handlers):
+ for sigcode, orig_handler in orig_signal_handlers.items():
signal.signal(sigcode, orig_handler)
+
+def validate_filters(filters):
+ """Validate user-provided filters
+
+ This function validates that a user-defined object represents valid
+ Arvados filters that can be passed to an API client: that it's a list of
+ 3-element lists with the field name and operator given as strings. If any
+ of these conditions are not true, it raises a ValueError with details about
+ the problem.
+
+ It returns validated filters. Currently the provided filters are returned
+ unmodified. Future versions of this function may clean up the filters with
+ "obvious" type conversions, so callers SHOULD use the returned value for
+ Arvados API calls.
+ """
+ if not isinstance(filters, list):
+ raise ValueError(f"filters are not a list: {filters!r}")
+ for index, f in enumerate(filters):
+ if isinstance(f, str):
+ match = FILTER_STR_RE.fullmatch(f)
+ if match is None:
+ raise ValueError(f"filter at index {index} has invalid syntax: {f!r}")
+ s, op, o = match.groups()
+ if s[0].isdigit():
+ raise ValueError(f"filter at index {index} has invalid syntax: bad field name {s!r}")
+ if o[0].isdigit():
+ raise ValueError(f"filter at index {index} has invalid syntax: bad field name {o!r}")
+ continue
+ elif not isinstance(f, list):
+ raise ValueError(f"filter at index {index} is not a string or list: {f!r}")
+ try:
+ s, op, o = f
+ except ValueError:
+ raise ValueError(
+ f"filter at index {index} does not have three items (field name, operator, operand): {f!r}",
+ ) from None
+ if not isinstance(s, str):
+ raise ValueError(f"filter at index {index} field name is not a string: {s!r}")
+ if not isinstance(op, str):
+ raise ValueError(f"filter at index {index} operator is not a string: {op!r}")
+ return filters
+
+
+class JSONArgument:
+ """Parse a JSON file from a command line argument string or path
+
+ JSONArgument objects can be called with a string and return an arbitrary
+ object. First it will try to decode the string as JSON. If that fails, it
+ will try to open a file at the path named by the string, and decode it as
+ JSON. If that fails, it raises ValueError with more detail.
+
+ This is designed to be used as an argparse argument type.
+ Typical usage looks like:
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--object', type=JSONArgument(), ...)
+
+ You can construct JSONArgument with an optional validation function. If
+ given, it is called with the object decoded from user input, and its
+ return value replaces it. It should raise ValueError if there is a problem
+ with the input. (argparse turns ValueError into a useful error message.)
+
+ filters_type = JSONArgument(validate_filters)
+ parser.add_argument('--filters', type=filters_type, ...)
+ """
+ def __init__(self, validator=None):
+ self.validator = validator
+
+ def __call__(self, value):
+ try:
+ retval = json.loads(value)
+ except json.JSONDecodeError:
+ try:
+ with open(value, 'rb') as json_file:
+ retval = json.load(json_file)
+ except json.JSONDecodeError as error:
+ raise ValueError(f"error decoding JSON from file {value!r}: {error}") from None
+ except (FileNotFoundError, ValueError):
+ raise ValueError(f"not a valid JSON string or file path: {value!r}") from None
+ except OSError as error:
+ raise ValueError(f"error reading JSON file path {value!r}: {error.strerror}") from None
+ if self.validator is not None:
+ retval = self.validator(retval)
+ return retval
diff --git a/sdk/python/tests/test_cmd_util.py b/sdk/python/tests/test_cmd_util.py
new file mode 100644
index 0000000000..ffd45aa4b7
--- /dev/null
+++ b/sdk/python/tests/test_cmd_util.py
@@ -0,0 +1,194 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import contextlib
+import copy
+import itertools
+import json
+import os
+import tempfile
+import unittest
+
+from pathlib import Path
+
+from parameterized import parameterized
+
+import arvados.commands._util as cmd_util
+
+FILE_PATH = Path(__file__)
+
+class ValidateFiltersTestCase(unittest.TestCase):
+ NON_FIELD_TYPES = [
+ None,
+ 123,
+ ('name', '=', 'tuple'),
+ {'filters': ['name', '=', 'object']},
+ ]
+ NON_FILTER_TYPES = NON_FIELD_TYPES + ['string']
+ VALID_FILTERS = [
+ ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890'],
+ ['name', 'in', ['foo', 'bar']],
+ '(replication_desired > replication_cofirmed)',
+ '(replication_confirmed>=replication_desired)',
+ ]
+
+ @parameterized.expand(itertools.combinations(VALID_FILTERS, 2))
+ def test_valid_filters(self, f1, f2):
+ expected = [f1, f2]
+ actual = cmd_util.validate_filters(copy.deepcopy(expected))
+ self.assertEqual(actual, expected)
+
+ @parameterized.expand([(t,) for t in NON_FILTER_TYPES])
+ def test_filters_wrong_type(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filters are not a list\b'):
+ cmd_util.validate_filters(value)
+
+ @parameterized.expand([(t,) for t in NON_FIELD_TYPES])
+ def test_single_filter_wrong_type(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filter at index 0 is not a string or list\b'):
+ cmd_util.validate_filters([value])
+
+ @parameterized.expand([
+ ([],),
+ (['owner_uuid'],),
+ (['owner_uuid', 'zzzzz-tpzed-12345abcde67890'],),
+ (['name', 'not in', 'foo', 'bar'],),
+ (['name', 'in', 'foo', 'bar', 'baz'],),
+ ])
+ def test_filters_wrong_arity(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filter at index 0 does not have three items\b'):
+ cmd_util.validate_filters([value])
+
+ @parameterized.expand(itertools.product(
+ [0, 1],
+ NON_FIELD_TYPES,
+ ))
+ def test_filter_definition_wrong_type(self, index, bad_value):
+ value = ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890']
+ value[index] = bad_value
+ name = ('field name', 'operator')[index]
+ with self.assertRaisesRegex(ValueError, rf'^filter at index 0 {name} is not a string\b'):
+ cmd_util.validate_filters([value])
+
+ @parameterized.expand([
+ # Not enclosed in parentheses
+ 'foo = bar',
+ '(foo) < bar',
+ 'foo > (bar)',
+ # Not exactly one operator
+ '(a >= b >= c)',
+ '(foo)',
+ '(file_count version)',
+ # Invalid field identifiers
+ '(version = 1)',
+ '(2 = file_count)',
+ '(replication.desired <= replication.confirmed)',
+ # Invalid whitespace
+ '(file_count\t=\tversion)',
+ '(file_count >= version\n)',
+ ])
+ def test_invalid_string_filter(self, value):
+ with self.assertRaisesRegex(ValueError, r'^filter at index 0 has invalid syntax\b'):
+ cmd_util.validate_filters([value])
+
+
+class JSONArgumentTestCase(unittest.TestCase):
+ JSON_OBJECTS = [
+ None,
+ 123,
+ 456.789,
+ 'string',
+ ['list', 1],
+ {'object': True, 'yaml': False},
+ ]
+
+ @classmethod
+ def setUpClass(cls):
+ cls.json_file = tempfile.NamedTemporaryFile(
+ 'w+',
+ encoding='utf-8',
+ prefix='argtest',
+ suffix='.json',
+ )
+ cls.parser = cmd_util.JSONArgument()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.json_file.close()
+
+ def setUp(self):
+ self.json_file.seek(0)
+ self.json_file.truncate()
+
+ @parameterized.expand((obj,) for obj in JSON_OBJECTS)
+ def test_valid_argument_string(self, obj):
+ actual = self.parser(json.dumps(obj))
+ self.assertEqual(actual, obj)
+
+ @parameterized.expand((obj,) for obj in JSON_OBJECTS)
+ def test_valid_argument_path(self, obj):
+ json.dump(obj, self.json_file)
+ self.json_file.flush()
+ actual = self.parser(self.json_file.name)
+ self.assertEqual(actual, obj)
+
+ @parameterized.expand([
+ '',
+ '\0',
+ None,
+ ])
+ def test_argument_not_json_or_path(self, value):
+ if value is None:
+ with tempfile.NamedTemporaryFile() as gone_file:
+ value = gone_file.name
+ with self.assertRaisesRegex(ValueError, r'\bnot a valid JSON string or file path\b'):
+ self.parser(value)
+
+ @parameterized.expand([
+ FILE_PATH.parent,
+ FILE_PATH / 'nonexistent.json',
+ None,
+ ])
+ def test_argument_path_unreadable(self, path):
+ if path is None:
+ bad_file = tempfile.NamedTemporaryFile()
+ os.chmod(bad_file.fileno(), 0o000)
+ path = bad_file.name
+ @contextlib.contextmanager
+ def ctx():
+ try:
+ yield
+ finally:
+ os.chmod(bad_file.fileno(), 0o600)
+ else:
+ ctx = contextlib.nullcontext
+ with self.assertRaisesRegex(ValueError, rf'^error reading JSON file path {str(path)!r}: '), ctx():
+ self.parser(str(path))
+
+ @parameterized.expand([
+ FILE_PATH,
+ None,
+ ])
+ def test_argument_path_not_json(self, path):
+ if path is None:
+ path = self.json_file.name
+ with self.assertRaisesRegex(ValueError, rf'^error decoding JSON from file {str(path)!r}'):
+ self.parser(str(path))
+
+
+class JSONArgumentValidationTestCase(unittest.TestCase):
+ @parameterized.expand((obj,) for obj in JSONArgumentTestCase.JSON_OBJECTS)
+ def test_object_returned_from_validator(self, value):
+ parser = cmd_util.JSONArgument(lambda _: copy.deepcopy(value))
+ self.assertEqual(parser('{}'), value)
+
+ @parameterized.expand((obj,) for obj in JSONArgumentTestCase.JSON_OBJECTS)
+ def test_exception_raised_from_validator(self, value):
+ json_value = json.dumps(value)
+ def raise_func(_):
+ raise ValueError(json_value)
+ parser = cmd_util.JSONArgument(raise_func)
+ with self.assertRaises(ValueError) as exc_check:
+ parser(json_value)
+ self.assertEqual(exc_check.exception.args, (json_value,))
diff --git a/services/api/test/fixtures/collections.yml b/services/api/test/fixtures/collections.yml
index a5c3e63dde..b3b01935c6 100644
--- a/services/api/test/fixtures/collections.yml
+++ b/services/api/test/fixtures/collections.yml
@@ -220,6 +220,51 @@ foo_collection_in_aproject:
manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
name: "zzzzz-4zz18-fy296fx3hot09f7 added sometime"
+fuse_filters_test_foo:
+ uuid: zzzzz-4zz18-4e2kjqv891jl3p3
+ current_version_uuid: zzzzz-4zz18-4e2kjqv891jl3p3
+ portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-000000000000000
+ owner_uuid: zzzzz-tpzed-fusefiltertest1
+ created_at: 2024-02-09T12:01:00Z
+ modified_at: 2024-02-09T12:01:01Z
+ updated_at: 2024-02-09T12:01:01Z
+ manifest_text: ". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\n"
+ name: foo
+ properties:
+ MainFile: foo
+
+fuse_filters_test_bar:
+ uuid: zzzzz-4zz18-qpxqtq2wbjnu630
+ current_version_uuid: zzzzz-4zz18-qpxqtq2wbjnu630
+ portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ owner_uuid: zzzzz-tpzed-fusefiltertest1
+ created_at: 2024-02-09T12:02:00Z
+ modified_at: 2024-02-09T12:02:01Z
+ updated_at: 2024-02-09T12:02:01Z
+ manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+ name: bar
+ properties:
+ MainFile: bar
+
+fuse_filters_test_baz:
+ uuid: zzzzz-4zz18-ls97ezovrkkpfxz
+ current_version_uuid: zzzzz-4zz18-ls97ezovrkkpfxz
+ portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f
+ owner_uuid: zzzzz-tpzed-fusefiltertest1
+ created_at: 2024-02-09T12:03:00Z
+ modified_at: 2024-02-09T12:03:01Z
+ updated_at: 2024-02-09T12:03:01Z
+ manifest_text: ". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\n"
+ name: baz
+ properties:
+ MainFile: baz
+
user_agreement_in_anonymously_accessible_project:
uuid: zzzzz-4zz18-uukreo9rbgwsujr
current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujr
diff --git a/services/api/test/fixtures/groups.yml b/services/api/test/fixtures/groups.yml
index 9a2dc169b6..9034ac6ee7 100644
--- a/services/api/test/fixtures/groups.yml
+++ b/services/api/test/fixtures/groups.yml
@@ -172,6 +172,17 @@ afiltergroup5:
properties:
filters: [["collections.properties.listprop","contains","elem1"],["uuid", "is_a", "arvados#collection"]]
+fuse_filters_test_project:
+ uuid: zzzzz-j7d0g-fusefiltertest1
+ owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ modified_by_client_uuid: zzzzz-ozdt8-brczlopd8u8d0jr
+ modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz
+ created_at: 2024-02-09T12:00:00Z
+ modified_at: 2024-02-09T12:00:01Z
+ updated_at: 2024-02-09T12:00:01Z
+ name: FUSE Filters Test Project 1
+ group_class: project
+
future_project_viewing_group:
uuid: zzzzz-j7d0g-futrprojviewgrp
owner_uuid: zzzzz-tpzed-000000000000000
diff --git a/services/fuse/arvados_fuse/command.py b/services/fuse/arvados_fuse/command.py
index 9c607c7f0c..610da477ca 100644
--- a/services/fuse/arvados_fuse/command.py
+++ b/services/fuse/arvados_fuse/command.py
@@ -117,7 +117,13 @@ class ArgumentParser(argparse.ArgumentParser):
self.add_argument('--unmount-timeout',
type=float, default=2.0,
help="Time to wait for graceful shutdown after --exec program exits and filesystem is unmounted")
-
+ self.add_argument(
+ '--filters',
+ type=arv_cmd.JSONArgument(arv_cmd.validate_filters),
+ help="""Filters to apply to all project, shared, and tag directory
+contents. Pass filters as either a JSON string or a path to a JSON file.
+The JSON object should be a list of filters in Arvados API list filter syntax.
+""")
self.add_argument('--exec', type=str, nargs=argparse.REMAINDER,
dest="exec_args", metavar=('command', 'args', '...', '--'),
help="""Mount, run a command, then unmount and exit""")
@@ -300,7 +306,14 @@ class Mount(object):
usr = self.api.users().current().execute(num_retries=self.args.retries)
now = time.time()
dir_class = None
- dir_args = [llfuse.ROOT_INODE, self.operations.inodes, self.api, self.args.retries, self.args.enable_write]
+ dir_args = [
+ llfuse.ROOT_INODE,
+ self.operations.inodes,
+ self.api,
+ self.args.retries,
+ self.args.enable_write,
+ self.args.filters,
+ ]
mount_readme = False
storage_classes = None
@@ -366,7 +379,12 @@ class Mount(object):
return
e = self.operations.inodes.add_entry(Directory(
- llfuse.ROOT_INODE, self.operations.inodes, self.api.config, self.args.enable_write))
+ llfuse.ROOT_INODE,
+ self.operations.inodes,
+ self.api.config,
+ self.args.enable_write,
+ self.args.filters,
+ ))
dir_args[0] = e.inode
for name in self.args.mount_by_id:
diff --git a/services/fuse/arvados_fuse/fusedir.py b/services/fuse/arvados_fuse/fusedir.py
index 8faf01cb6c..e3b8dd4c2c 100644
--- a/services/fuse/arvados_fuse/fusedir.py
+++ b/services/fuse/arvados_fuse/fusedir.py
@@ -36,7 +36,7 @@ class Directory(FreshBase):
and the value referencing a File or Directory object.
"""
- def __init__(self, parent_inode, inodes, apiconfig, enable_write):
+ def __init__(self, parent_inode, inodes, apiconfig, enable_write, filters):
"""parent_inode is the integer inode number"""
super(Directory, self).__init__()
@@ -50,6 +50,19 @@ class Directory(FreshBase):
self._entries = {}
self._mtime = time.time()
self._enable_write = enable_write
+ self._filters = filters or []
+
+ def _filters_for(self, subtype, *, qualified):
+ for f in self._filters:
+ f_type, _, f_name = f[0].partition('.')
+ if not f_name:
+ yield f
+ elif f_type != subtype:
+ pass
+ elif qualified:
+ yield f
+ else:
+ yield [f_name, *f[1:]]
def forward_slash_subst(self):
if not hasattr(self, '_fsns'):
@@ -270,8 +283,8 @@ class CollectionDirectoryBase(Directory):
"""
- def __init__(self, parent_inode, inodes, apiconfig, enable_write, collection, collection_root):
- super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write)
+ def __init__(self, parent_inode, inodes, apiconfig, enable_write, filters, collection, collection_root):
+ super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, apiconfig, enable_write, filters)
self.apiconfig = apiconfig
self.collection = collection
self.collection_root = collection_root
@@ -287,7 +300,15 @@ class CollectionDirectoryBase(Directory):
item.fuse_entry.dead = False
self._entries[name] = item.fuse_entry
elif isinstance(item, arvados.collection.RichCollectionBase):
- self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(self.inode, self.inodes, self.apiconfig, self._enable_write, item, self.collection_root))
+ self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(
+ self.inode,
+ self.inodes,
+ self.apiconfig,
+ self._enable_write,
+ self._filters,
+ item,
+ self.collection_root,
+ ))
self._entries[name].populate(mtime)
else:
self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime, self._enable_write))
@@ -434,8 +455,8 @@ class CollectionDirectoryBase(Directory):
class CollectionDirectory(CollectionDirectoryBase):
"""Represents the root of a directory tree representing a collection."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, collection_record=None, explicit_collection=None):
- super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, None, self)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters=None, collection_record=None, explicit_collection=None):
+ super(CollectionDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters, None, self)
self.api = api
self.num_retries = num_retries
self._poll = True
@@ -637,7 +658,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
def save_new(self):
pass
- def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, storage_classes=None):
+ def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, filters=None, storage_classes=None):
collection = self.UnsaveableCollection(
api_client=api_client,
keep_client=api_client.keep,
@@ -646,7 +667,7 @@ class TmpCollectionDirectory(CollectionDirectoryBase):
# This is always enable_write=True because it never tries to
# save to the backend
super(TmpCollectionDirectory, self).__init__(
- parent_inode, inodes, api_client.config, True, collection, self)
+ parent_inode, inodes, api_client.config, True, filters, collection, self)
self.populate(self.mtime())
def on_event(self, *args, **kwargs):
@@ -742,8 +763,8 @@ and the directory will appear if it exists.
""".lstrip()
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, pdh_only=False, storage_classes=None):
- super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, pdh_only=False, storage_classes=None):
+ super(MagicDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.pdh_only = pdh_only
@@ -759,8 +780,14 @@ and the directory will appear if it exists.
# If we're the root directory, add an identical by_id subdirectory.
if self.inode == llfuse.ROOT_INODE:
self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- self.pdh_only))
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ self.pdh_only,
+ ))
def __contains__(self, k):
if k in self._entries:
@@ -774,15 +801,34 @@ and the directory will appear if it exists.
if group_uuid_pattern.match(k):
project = self.api.groups().list(
- filters=[['group_class', 'in', ['project','filter']], ["uuid", "=", k]]).execute(num_retries=self.num_retries)
+ filters=[
+ ['group_class', 'in', ['project','filter']],
+ ["uuid", "=", k],
+ *self._filters_for('groups', qualified=False),
+ ],
+ ).execute(num_retries=self.num_retries)
if project[u'items_available'] == 0:
return False
e = self.inodes.add_entry(ProjectDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- project[u'items'][0], storage_classes=self.storage_classes))
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ project[u'items'][0],
+ storage_classes=self.storage_classes,
+ ))
else:
e = self.inodes.add_entry(CollectionDirectory(
- self.inode, self.inodes, self.api, self.num_retries, self._enable_write, k))
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ k,
+ ))
if e.update():
if k not in self._entries:
@@ -816,8 +862,8 @@ and the directory will appear if it exists.
class TagsDirectory(Directory):
"""A special directory that contains as subdirectories all tags visible to the user."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, poll_time=60):
- super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, poll_time=60):
+ super(TagsDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
self.api = api
self.num_retries = num_retries
self._poll = True
@@ -831,15 +877,32 @@ class TagsDirectory(Directory):
def update(self):
with llfuse.lock_released:
tags = self.api.links().list(
- filters=[['link_class', '=', 'tag'], ["name", "!=", ""]],
- select=['name'], distinct=True, limit=1000
- ).execute(num_retries=self.num_retries)
+ filters=[
+ ['link_class', '=', 'tag'],
+ ['name', '!=', ''],
+ *self._filters_for('links', qualified=False),
+ ],
+ select=['name'],
+ distinct=True,
+ limit=1000,
+ ).execute(num_retries=self.num_retries)
if "items" in tags:
- self.merge(tags['items']+[{"name": n} for n in self._extra],
- lambda i: i['name'],
- lambda a, i: a.tag == i['name'],
- lambda i: TagDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- i['name'], poll=self._poll, poll_time=self._poll_time))
+ self.merge(
+ tags['items']+[{"name": n} for n in self._extra],
+ lambda i: i['name'],
+ lambda a, i: a.tag == i['name'],
+ lambda i: TagDirectory(
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ i['name'],
+ poll=self._poll,
+ poll_time=self._poll_time,
+ ),
+ )
@use_counter
@check_update
@@ -848,7 +911,12 @@ class TagsDirectory(Directory):
return super(TagsDirectory, self).__getitem__(item)
with llfuse.lock_released:
tags = self.api.links().list(
- filters=[['link_class', '=', 'tag'], ['name', '=', item]], limit=1
+ filters=[
+ ['link_class', '=', 'tag'],
+ ['name', '=', item],
+ *self._filters_for('links', qualified=False),
+ ],
+ limit=1,
).execute(num_retries=self.num_retries)
if tags["items"]:
self._extra.add(item)
@@ -873,9 +941,9 @@ class TagDirectory(Directory):
to the user that are tagged with a particular tag.
"""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, tag,
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, tag,
poll=False, poll_time=60):
- super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ super(TagDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.tag = tag
@@ -889,23 +957,36 @@ class TagDirectory(Directory):
def update(self):
with llfuse.lock_released:
taggedcollections = self.api.links().list(
- filters=[['link_class', '=', 'tag'],
- ['name', '=', self.tag],
- ['head_uuid', 'is_a', 'arvados#collection']],
- select=['head_uuid']
- ).execute(num_retries=self.num_retries)
- self.merge(taggedcollections['items'],
- lambda i: i['head_uuid'],
- lambda a, i: a.collection_locator == i['head_uuid'],
- lambda i: CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid']))
+ filters=[
+ ['link_class', '=', 'tag'],
+ ['name', '=', self.tag],
+ ['head_uuid', 'is_a', 'arvados#collection'],
+ *self._filters_for('links', qualified=False),
+ ],
+ select=['head_uuid'],
+ ).execute(num_retries=self.num_retries)
+ self.merge(
+ taggedcollections['items'],
+ lambda i: i['head_uuid'],
+ lambda a, i: a.collection_locator == i['head_uuid'],
+ lambda i: CollectionDirectory(
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ i['head_uuid'],
+ ),
+ )
class ProjectDirectory(Directory):
"""A special directory that contains the contents of a project."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, project_object,
- poll=True, poll_time=3, storage_classes=None):
- super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+ project_object, poll=True, poll_time=3, storage_classes=None):
+ super(ProjectDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.project_object = project_object
@@ -922,14 +1003,14 @@ class ProjectDirectory(Directory):
return True
def createDirectory(self, i):
+ common_args = (self.inode, self.inodes, self.api, self.num_retries, self._enable_write, self._filters)
if collection_uuid_pattern.match(i['uuid']):
- return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i)
+ return CollectionDirectory(*common_args, i)
elif group_uuid_pattern.match(i['uuid']):
- return ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- i, self._poll, self._poll_time, self.storage_classes)
+ return ProjectDirectory(*common_args, i, self._poll, self._poll_time, self.storage_classes)
elif link_uuid_pattern.match(i['uuid']):
if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):
- return CollectionDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write, i['head_uuid'])
+ return CollectionDirectory(*common_args, i['head_uuid'])
else:
return None
elif uuid_pattern.match(i['uuid']):
@@ -990,19 +1071,27 @@ class ProjectDirectory(Directory):
self.project_object = self.api.users().get(
uuid=self.project_uuid).execute(num_retries=self.num_retries)
# do this in 2 steps until #17424 is fixed
- contents = list(arvados.util.keyset_list_all(self.api.groups().contents,
- order_key="uuid",
- num_retries=self.num_retries,
- uuid=self.project_uuid,
- filters=[["uuid", "is_a", "arvados#group"],
- ["groups.group_class", "in", ["project","filter"]]]))
- contents.extend(filter(lambda i: i["current_version_uuid"] == i["uuid"],
- arvados.util.keyset_list_all(self.api.groups().contents,
- order_key="uuid",
- num_retries=self.num_retries,
- uuid=self.project_uuid,
- filters=[["uuid", "is_a", "arvados#collection"]])))
-
+ contents = list(arvados.util.keyset_list_all(
+ self.api.groups().contents,
+ order_key='uuid',
+ num_retries=self.num_retries,
+ uuid=self.project_uuid,
+ filters=[
+ ['uuid', 'is_a', 'arvados#group'],
+ ['groups.group_class', 'in', ['project', 'filter']],
+ *self._filters_for('groups', qualified=True),
+ ],
+ ))
+ contents.extend(obj for obj in arvados.util.keyset_list_all(
+ self.api.groups().contents,
+ order_key='uuid',
+ num_retries=self.num_retries,
+ uuid=self.project_uuid,
+ filters=[
+ ['uuid', 'is_a', 'arvados#collection'],
+ *self._filters_for('collections', qualified=True),
+ ],
+ ) if obj['current_version_uuid'] == obj['uuid'])
# end with llfuse.lock_released, re-acquire lock
@@ -1032,14 +1121,24 @@ class ProjectDirectory(Directory):
namefilter = ["name", "=", k]
else:
namefilter = ["name", "in", [k, k2]]
- contents = self.api.groups().list(filters=[["owner_uuid", "=", self.project_uuid],
- ["group_class", "in", ["project","filter"]],
- namefilter],
- limit=2).execute(num_retries=self.num_retries)["items"]
+ contents = self.api.groups().list(
+ filters=[
+ ["owner_uuid", "=", self.project_uuid],
+ ["group_class", "in", ["project","filter"]],
+ namefilter,
+ *self._filters_for('groups', qualified=False),
+ ],
+ limit=2,
+ ).execute(num_retries=self.num_retries)["items"]
if not contents:
- contents = self.api.collections().list(filters=[["owner_uuid", "=", self.project_uuid],
- namefilter],
- limit=2).execute(num_retries=self.num_retries)["items"]
+ contents = self.api.collections().list(
+ filters=[
+ ["owner_uuid", "=", self.project_uuid],
+ namefilter,
+ *self._filters_for('collections', qualified=False),
+ ],
+ limit=2,
+ ).execute(num_retries=self.num_retries)["items"]
if contents:
if len(contents) > 1 and contents[1]['name'] == k:
# If "foo/bar" and "foo[SUBST]bar" both exist, use
@@ -1193,9 +1292,9 @@ class ProjectDirectory(Directory):
class SharedDirectory(Directory):
"""A special directory that represents users or groups who have shared projects with me."""
- def __init__(self, parent_inode, inodes, api, num_retries, enable_write, exclude,
- poll=False, poll_time=60, storage_classes=None):
- super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write)
+ def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,
+ exclude, poll=False, poll_time=60, storage_classes=None):
+ super(SharedDirectory, self).__init__(parent_inode, inodes, api.config, enable_write, filters)
self.api = api
self.num_retries = num_retries
self.current_user = api.users().current().execute(num_retries=num_retries)
@@ -1221,11 +1320,17 @@ class SharedDirectory(Directory):
if 'httpMethod' in methods.get('shared', {}):
page = []
while True:
- resp = self.api.groups().shared(filters=[['group_class', 'in', ['project','filter']]]+page,
- order="uuid",
- limit=10000,
- count="none",
- include="owner_uuid").execute()
+ resp = self.api.groups().shared(
+ filters=[
+ ['group_class', 'in', ['project','filter']],
+ *page,
+ *self._filters_for('groups', qualified=False),
+ ],
+ order="uuid",
+ limit=10000,
+ count="none",
+ include="owner_uuid",
+ ).execute()
if not resp["items"]:
break
page = [["uuid", ">", resp["items"][len(resp["items"])-1]["uuid"]]]
@@ -1240,8 +1345,12 @@ class SharedDirectory(Directory):
self.api.groups().list,
order_key="uuid",
num_retries=self.num_retries,
- filters=[['group_class','in',['project','filter']]],
- select=["uuid", "owner_uuid"]))
+ filters=[
+ ['group_class', 'in', ['project','filter']],
+ *self._filters_for('groups', qualified=False),
+ ],
+ select=["uuid", "owner_uuid"],
+ ))
for ob in all_projects:
objects[ob['uuid']] = ob
@@ -1255,13 +1364,20 @@ class SharedDirectory(Directory):
self.api.users().list,
order_key="uuid",
num_retries=self.num_retries,
- filters=[['uuid','in', list(root_owners)]])
+ filters=[
+ ['uuid', 'in', list(root_owners)],
+ *self._filters_for('users', qualified=False),
+ ],
+ )
lgroups = arvados.util.keyset_list_all(
self.api.groups().list,
order_key="uuid",
num_retries=self.num_retries,
- filters=[['uuid','in', list(root_owners)+roots]])
-
+ filters=[
+ ['uuid', 'in', list(root_owners)+roots],
+ *self._filters_for('groups', qualified=False),
+ ],
+ )
for l in lusers:
objects[l["uuid"]] = l
for l in lgroups:
@@ -1283,11 +1399,23 @@ class SharedDirectory(Directory):
# end with llfuse.lock_released, re-acquire lock
- self.merge(contents.items(),
- lambda i: i[0],
- lambda a, i: a.uuid() == i[1]['uuid'],
- lambda i: ProjectDirectory(self.inode, self.inodes, self.api, self.num_retries, self._enable_write,
- i[1], poll=self._poll, poll_time=self._poll_time, storage_classes=self.storage_classes))
+ self.merge(
+ contents.items(),
+ lambda i: i[0],
+ lambda a, i: a.uuid() == i[1]['uuid'],
+ lambda i: ProjectDirectory(
+ self.inode,
+ self.inodes,
+ self.api,
+ self.num_retries,
+ self._enable_write,
+ self._filters,
+ i[1],
+ poll=self._poll,
+ poll_time=self._poll_time,
+ storage_classes=self.storage_classes,
+ ),
+ )
except Exception:
_logger.exception("arv-mount shared dir error")
finally:
diff --git a/services/fuse/tests/mount_test_base.py b/services/fuse/tests/mount_test_base.py
index c316010f6c..8a3522e0cb 100644
--- a/services/fuse/tests/mount_test_base.py
+++ b/services/fuse/tests/mount_test_base.py
@@ -72,15 +72,22 @@ class MountTestBase(unittest.TestCase):
llfuse.close()
def make_mount(self, root_class, **root_kwargs):
- enable_write = True
- if 'enable_write' in root_kwargs:
- enable_write = root_kwargs.pop('enable_write')
+ enable_write = root_kwargs.pop('enable_write', True)
self.operations = fuse.Operations(
- os.getuid(), os.getgid(),
+ os.getuid(),
+ os.getgid(),
api_client=self.api,
- enable_write=enable_write)
+ enable_write=enable_write,
+ )
self.operations.inodes.add_entry(root_class(
- llfuse.ROOT_INODE, self.operations.inodes, self.api, 0, enable_write, **root_kwargs))
+ llfuse.ROOT_INODE,
+ self.operations.inodes,
+ self.api,
+ 0,
+ enable_write,
+ root_kwargs.pop('filters', None),
+ **root_kwargs,
+ ))
llfuse.init(self.operations, self.mounttmp, [])
self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())
self.llfuse_thread.daemon = True
diff --git a/services/fuse/tests/test_mount.py b/services/fuse/tests/test_mount.py
index f4e5138e2c..ef9c25bcf5 100644
--- a/services/fuse/tests/test_mount.py
+++ b/services/fuse/tests/test_mount.py
@@ -1126,7 +1126,10 @@ class MagicDirApiError(FuseMagicTest):
class SanitizeFilenameTest(MountTestBase):
def test_sanitize_filename(self):
- pdir = fuse.ProjectDirectory(1, {}, self.api, 0, False, project_object=self.api.users().current().execute())
+ pdir = fuse.ProjectDirectory(
+ 1, {}, self.api, 0, False, None,
+ project_object=self.api.users().current().execute(),
+ )
acceptable = [
"foo.txt",
".foo",
diff --git a/services/fuse/tests/test_mount_filters.py b/services/fuse/tests/test_mount_filters.py
new file mode 100644
index 0000000000..5f324537fb
--- /dev/null
+++ b/services/fuse/tests/test_mount_filters.py
@@ -0,0 +1,223 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+import collections
+import itertools
+import json
+import re
+import unittest
+
+from pathlib import Path
+
+from parameterized import parameterized
+
+from arvados_fuse import fusedir
+
+from .integration_test import IntegrationTest
+from .mount_test_base import MountTestBase
+from .run_test_server import fixture
+
+_COLLECTIONS = fixture('collections')
+_GROUPS = fixture('groups')
+_LINKS = fixture('links')
+_USERS = fixture('users')
+
+class DirectoryFiltersTestCase(MountTestBase):
+ DEFAULT_ROOT_KWARGS = {
+ 'enable_write': False,
+ 'filters': [
+ ['collections.name', 'like', 'zzzzz-4zz18-%'],
+ # This matches both "A Project" (which we use as the test root)
+ # and "A Subproject" (which we assert is found under it).
+ ['groups.name', 'like', 'A %roject'],
+ ],
+ }
+ EXPECTED_PATHS = frozenset([
+ _COLLECTIONS['foo_collection_in_aproject']['name'],
+ _GROUPS['asubproject']['name'],
+ ])
+ CHECKED_PATHS = EXPECTED_PATHS.union([
+ _COLLECTIONS['collection_to_move_around_in_aproject']['name'],
+ _GROUPS['subproject_in_active_user_home_project_to_test_unique_key_violation']['name'],
+ ])
+
+ @parameterized.expand([
+ (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),
+ (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),
+ (fusedir.SharedDirectory, {'exclude': None}, Path(
+ '{first_name} {last_name}'.format_map(_USERS['active']),
+ _GROUPS['aproject']['name'],
+ )),
+ ])
+ def test_filtered_path_exists(self, root_class, root_kwargs, subdir):
+ root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)
+ self.make_mount(root_class, **root_kwargs)
+ dir_path = Path(self.mounttmp, subdir)
+ actual = frozenset(
+ basename
+ for basename in self.CHECKED_PATHS
+ if (dir_path / basename).exists()
+ )
+ self.assertEqual(
+ actual,
+ self.EXPECTED_PATHS,
+ "mount existence checks did not match expected results",
+ )
+
+ @parameterized.expand([
+ (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),
+ (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),
+ (fusedir.SharedDirectory, {'exclude': None}, Path(
+ '{first_name} {last_name}'.format_map(_USERS['active']),
+ _GROUPS['aproject']['name'],
+ )),
+ ])
+ def test_filtered_path_listing(self, root_class, root_kwargs, subdir):
+ root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)
+ self.make_mount(root_class, **root_kwargs)
+ actual = frozenset(path.name for path in Path(self.mounttmp, subdir).iterdir())
+ self.assertEqual(
+ actual & self.EXPECTED_PATHS,
+ self.EXPECTED_PATHS,
+ "mount listing did not include minimum matches",
+ )
+ extra = frozenset(
+ name
+ for name in actual
+ if not (name.startswith('zzzzz-4zz18-') or name.endswith('roject'))
+ )
+ self.assertFalse(
+ extra,
+ "mount listing included results outside filters",
+ )
+
+
+class TagFiltersTestCase(MountTestBase):
+ COLL_UUID = _COLLECTIONS['foo_collection_in_aproject']['uuid']
+ TAG_NAME = _LINKS['foo_collection_tag']['name']
+
+ @parameterized.expand([
+ '=',
+ '!=',
+ ])
+ def test_tag_directory_filters(self, op):
+ self.make_mount(
+ fusedir.TagDirectory,
+ enable_write=False,
+ filters=[
+ ['links.head_uuid', op, self.COLL_UUID],
+ ],
+ tag=self.TAG_NAME,
+ )
+ checked_path = Path(self.mounttmp, self.COLL_UUID)
+ self.assertEqual(checked_path.exists(), op == '=')
+
+ @parameterized.expand(itertools.product(
+ ['in', 'not in'],
+ ['=', '!='],
+ ))
+ def test_tags_directory_filters(self, coll_op, link_op):
+ self.make_mount(
+ fusedir.TagsDirectory,
+ enable_write=False,
+ filters=[
+ ['links.head_uuid', coll_op, [self.COLL_UUID]],
+ ['links.name', link_op, self.TAG_NAME],
+ ],
+ )
+ if link_op == '!=':
+ filtered_path = Path(self.mounttmp, self.TAG_NAME)
+ elif coll_op == 'not in':
+ # As of 2024-02-09, foo tag only applies to the single collection.
+ # If you filter it out via head_uuid, then it disappears completely
+ # from the TagsDirectory. Hence we set that tag directory as
+ # filtered_path. If any of this changes in the future,
+ # it would be fine to append self.COLL_UUID to filtered_path here.
+ filtered_path = Path(self.mounttmp, self.TAG_NAME)
+ else:
+ filtered_path = Path(self.mounttmp, self.TAG_NAME, self.COLL_UUID, 'foo', 'nonexistent')
+ expect_path = filtered_path.parent
+ self.assertTrue(
+ expect_path.exists(),
+ f"path not found but should exist: {expect_path}",
+ )
+ self.assertFalse(
+ filtered_path.exists(),
+ f"path was found but should be filtered out: {filtered_path}",
+ )
+
+
+class FiltersIntegrationTest(IntegrationTest):
+ COLLECTIONS_BY_PROP = {
+ coll['properties']['MainFile']: coll
+ for coll in _COLLECTIONS.values()
+ if coll['owner_uuid'] == _GROUPS['fuse_filters_test_project']['uuid']
+ }
+ PROP_VALUES = list(COLLECTIONS_BY_PROP)
+
+ for test_n, query in enumerate(['foo', 'ba?']):
+ @IntegrationTest.mount([
+ '--filters', json.dumps([
+ ['collections.properties.MainFile', 'like', query],
+ ]),
+ '--mount-by-pdh', 'by_pdh',
+ '--mount-by-id', 'by_id',
+ '--mount-home', 'home',
+ ])
+ def _test_func(self, query=query):
+ pdh_path = Path(self.mnt, 'by_pdh')
+ id_path = Path(self.mnt, 'by_id')
+ home_path = Path(self.mnt, 'home')
+ query_re = re.compile(query.replace('?', '.'))
+ for prop_val, coll in self.COLLECTIONS_BY_PROP.items():
+ should_exist = query_re.fullmatch(prop_val) is not None
+ for path in [
+ pdh_path / coll['portable_data_hash'],
+ id_path / coll['portable_data_hash'],
+ id_path / coll['uuid'],
+ home_path / coll['name'],
+ ]:
+ self.assertEqual(
+ path.exists(),
+ should_exist,
+ f"{path} from MainFile={prop_val} exists!={should_exist}",
+ )
+ exec(f"test_collection_properties_filters_{test_n} = _test_func")
+
+ for test_n, mount_opts in enumerate([
+ ['--home'],
+ ['--project', _GROUPS['aproject']['uuid']],
+ ]):
+ @IntegrationTest.mount([
+ '--filters', json.dumps([
+ ['collections.name', 'like', 'zzzzz-4zz18-%'],
+ ['groups.name', 'like', 'A %roject'],
+ ]),
+ *mount_opts,
+ ])
+ def _test_func(self, mount_opts=mount_opts):
+ root_path = Path(self.mnt)
+ root_depth = len(root_path.parts)
+ max_depth = 0
+ name_re = re.compile(r'(zzzzz-4zz18-.*|A .*roject)')
+ dir_queue = [root_path]
+ while dir_queue:
+ root_path = dir_queue.pop()
+ max_depth = max(max_depth, len(root_path.parts))
+ for child in root_path.iterdir():
+ if not child.is_dir():
+ continue
+ match = name_re.fullmatch(child.name)
+ self.assertIsNotNone(
+ match,
+ "found directory with name that should've been filtered",
+ )
+ if not match.group(1).startswith('zzzzz-4zz18-'):
+ dir_queue.append(child)
+ self.assertGreaterEqual(
+ max_depth,
+ root_depth + (2 if mount_opts[0] == '--home' else 1),
+ "test descended fewer subdirectories than expected",
+ )
+ exec(f"test_multiple_name_filters_{test_n} = _test_func")
commit 89d7d0839427c46f82c0df351456df811b4a9e27
Author: Brett Smith <brett.smith at curii.com>
Date: Mon Feb 12 10:59:46 2024 -0500
Merge branch 'github-pr-223'
Closes #21432.
<https://github.com/arvados/arvados/pull/223>
Arvados-DCO-1.1-Signed-off-by: Brett Smith <brett.smith at curii.com>
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
index 2c3168649f..ad37dad2bb 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java
@@ -10,9 +10,13 @@ package org.arvados.client.api.client;
import okhttp3.HttpUrl;
import okhttp3.Request;
import okhttp3.RequestBody;
+import okhttp3.Response;
+import okhttp3.ResponseBody;
+
import org.arvados.client.config.ConfigProvider;
import java.io.File;
+import java.io.IOException;
import java.io.InputStream;
public class KeepWebApiClient extends BaseApiClient {
@@ -30,6 +34,27 @@ public class KeepWebApiClient extends BaseApiClient {
return newFileCall(request);
}
+ public InputStream get(String collectionUuid, String filePathName, long start, Long end) throws IOException {
+ Request.Builder builder = this.getRequestBuilder();
+ String rangeValue = "bytes=" + start + "-";
+ if (end != null) {
+ rangeValue += end;
+ }
+ builder.addHeader("Range", rangeValue);
+ Request request = builder.url(this.getUrlBuilder(collectionUuid, filePathName).build()).get().build();
+ Response response = client.newCall(request).execute();
+ if (!response.isSuccessful()) {
+ response.close();
+ throw new IOException("Failed to download file: " + response);
+ }
+ ResponseBody body = response.body();
+ if (body == null) {
+ response.close();
+ throw new IOException("Response body is null for request: " + request);
+ }
+ return body.byteStream();
+ }
+
public String delete(String collectionUuid, String filePathName) {
Request request = getRequestBuilder()
.url(getUrlBuilder(collectionUuid, filePathName).build())
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
index c1e8849e39..5bfcabc109 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java
@@ -23,6 +23,8 @@ import org.slf4j.Logger;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
@@ -70,6 +72,37 @@ public class FileDownloader {
return downloadedFile;
}
+ public File downloadFileWithResume(String collectionUuid, String fileName, String pathToDownloadFolder, long start, Long end) throws IOException {
+ if (end != null && end < start) {
+ throw new IllegalArgumentException("End index must be greater than or equal to the start index");
+ }
+
+ File destinationFile = new File(pathToDownloadFolder, fileName);
+
+ if (!destinationFile.exists()) {
+ boolean isCreated = destinationFile.createNewFile();
+ if (!isCreated) {
+ throw new IOException("Failed to create new file: " + destinationFile.getAbsolutePath());
+ }
+ }
+
+ try (RandomAccessFile outputFile = new RandomAccessFile(destinationFile, "rw");
+ InputStream inputStream = keepWebApiClient.get(collectionUuid, fileName, start, end)) {
+ outputFile.seek(start);
+
+ long remaining = (end == null) ? Long.MAX_VALUE : end - start + 1;
+ byte[] buffer = new byte[4096];
+ int bytesRead;
+ while ((bytesRead = inputStream.read(buffer)) != -1 && remaining > 0) {
+ int bytesToWrite = (int) Math.min(bytesRead, remaining);
+ outputFile.write(buffer, 0, bytesToWrite);
+ remaining -= bytesToWrite;
+ }
+ }
+
+ return destinationFile;
+ }
+
public List<File> downloadFilesFromCollectionUsingKeepWeb(String collectionUuid, String pathToDownloadFolder) {
String collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder).getAbsolutePath();
List<FileToken> fileTokens = listFileInfoFromCollection(collectionUuid);
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
index 07b7b25339..9b6b4fa17f 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java
@@ -10,15 +10,23 @@ package org.arvados.client.api.client;
import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
import org.junit.Test;
+import java.io.ByteArrayOutputStream;
import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
import java.nio.file.Files;
+import okhttp3.mockwebserver.MockResponse;
+import okio.Buffer;
+
import static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertNotNull;
public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {
- private KeepWebApiClient client = new KeepWebApiClient(CONFIG);
+ private final KeepWebApiClient client = new KeepWebApiClient(CONFIG);
@Test
public void uploadFile() throws Exception {
@@ -36,4 +44,38 @@ public class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {
assertThat(uploadResponse).isEqualTo("Created");
}
+ @Test
+ public void downloadPartialIsPerformedSuccessfully() throws Exception {
+ // given
+ String collectionUuid = "some-collection-uuid";
+ String filePathName = "sample-file-path";
+ long start = 1024;
+ Long end = null;
+
+ byte[] expectedData = "test data".getBytes();
+
+ try (Buffer buffer = new Buffer().write(expectedData)) {
+ server.enqueue(new MockResponse().setBody(buffer));
+
+ // when
+ InputStream inputStream = client.get(collectionUuid, filePathName, start, end);
+ byte[] actualData = inputStreamToByteArray(inputStream);
+
+ // then
+ assertNotNull(actualData);
+ assertArrayEquals(expectedData, actualData);
+ }
+ }
+
+ private byte[] inputStreamToByteArray(InputStream inputStream) throws IOException {
+ ByteArrayOutputStream buffer = new ByteArrayOutputStream();
+ int nRead;
+ byte[] data = new byte[1024];
+ while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
+ buffer.write(data, 0, nRead);
+ }
+ buffer.flush();
+ return buffer.toByteArray();
+ }
+
}
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
index 0fb1f0206c..741f80f7c9 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java
@@ -19,7 +19,6 @@ import org.arvados.client.test.utils.FileTestUtils;
import org.arvados.client.utils.FileMerge;
import org.apache.commons.io.FileUtils;
import org.junit.After;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -27,8 +26,11 @@ import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
+import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
@@ -36,6 +38,10 @@ import java.util.UUID;
import static org.arvados.client.test.utils.FileTestUtils.*;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
@@ -80,17 +86,17 @@ public class FileDownloaderTest {
List<File> downloadedFiles = fileDownloader.downloadFilesFromCollection(collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
//then
- Assert.assertEquals(3, downloadedFiles.size()); // 3 files downloaded
+ assertEquals(3, downloadedFiles.size()); // 3 files downloaded
File collectionDir = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionToDownload.getUuid());
- Assert.assertTrue(collectionDir.exists()); // collection directory created
+ assertTrue(collectionDir.exists()); // collection directory created
// 3 files correctly saved
assertThat(downloadedFiles).allMatch(File::exists);
for(int i = 0; i < downloadedFiles.size(); i ++) {
File downloaded = new File(collectionDir + Characters.SLASH + files.get(i).getName());
- Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
+ assertArrayEquals(FileUtils.readFileToByteArray(downloaded), FileUtils.readFileToByteArray(files.get(i)));
}
}
@@ -108,9 +114,32 @@ public class FileDownloaderTest {
File downloadedFile = fileDownloader.downloadSingleFileUsingKeepWeb(file.getName(), collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);
//then
- Assert.assertTrue(downloadedFile.exists());
- Assert.assertEquals(file.getName(), downloadedFile.getName());
- Assert.assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+ assertTrue(downloadedFile.exists());
+ assertEquals(file.getName(), downloadedFile.getName());
+ assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));
+ }
+
+ @Test
+ public void testDownloadFileWithResume() throws Exception {
+ //given
+ String collectionUuid = "some-collection-uuid";
+ String expectedDataString = "testData";
+ String fileName = "sample-file-name";
+ long start = 0;
+ Long end = null;
+
+ InputStream inputStream = new ByteArrayInputStream(expectedDataString.getBytes());
+
+ when(keepWebApiClient.get(collectionUuid, fileName, start, end)).thenReturn(inputStream);
+
+ //when
+ File downloadedFile = fileDownloader.downloadFileWithResume(collectionUuid, fileName, FILE_DOWNLOAD_TEST_DIR, start, end);
+
+ //then
+ assertNotNull(downloadedFile);
+ assertTrue(downloadedFile.exists());
+ String actualDataString = Files.readString(downloadedFile.toPath());
+ assertEquals("The content of the file does not match the expected data.", expectedDataString, actualDataString);
}
@After
commit aeafe22313edb1633e6f5ce14b883ea6f2962b34
Author: Brett Smith <brett.smith at curii.com>
Date: Fri Feb 9 14:51:46 2024 -0500
Merge branch 'github-pr-224'
<https://github.com/arvados/arvados/pull/224>
Closes #21457.
Arvados-DCO-1.1-Signed-off-by: Brett Smith <brett.smith at curii.com>
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
index ab03d34f19..4bd59a75d7 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java
@@ -27,7 +27,7 @@ import java.util.Map;
public abstract class BaseStandardApiClient<T extends Item, L extends ItemList> extends BaseApiClient {
- private static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
+ protected static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());
private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseStandardApiClient.class);
BaseStandardApiClient(ConfigProvider config) {
@@ -107,7 +107,7 @@ public abstract class BaseStandardApiClient<T extends Item, L extends ItemList>
return MAPPER.readValue(content, cls);
}
- private <TL> String mapToJson(TL type) {
+ protected <TL> String mapToJson(TL type) {
ObjectWriter writer = MAPPER.writer().withDefaultPrettyPrinter();
try {
return writer.writeValueAsString(type);
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
index 141f02deba..581253f53c 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java
@@ -9,12 +9,18 @@ package org.arvados.client.api.client;
import org.arvados.client.api.model.Collection;
import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.CollectionReplaceFiles;
import org.arvados.client.config.ConfigProvider;
import org.slf4j.Logger;
+import okhttp3.HttpUrl;
+import okhttp3.Request;
+import okhttp3.RequestBody;
+
public class CollectionsApiClient extends BaseStandardApiClient<Collection, CollectionList> {
private static final String RESOURCE = "collections";
+
private final Logger log = org.slf4j.LoggerFactory.getLogger(CollectionsApiClient.class);
public CollectionsApiClient(ConfigProvider config) {
@@ -28,6 +34,14 @@ public class CollectionsApiClient extends BaseStandardApiClient<Collection, Coll
return newCollection;
}
+ public Collection update(String collectionUUID, CollectionReplaceFiles replaceFilesRequest) {
+ String json = mapToJson(replaceFilesRequest);
+ RequestBody body = RequestBody.create(JSON, json);
+ HttpUrl url = getUrlBuilder().addPathSegment(collectionUUID).build();
+ Request request = getRequestBuilder().put(body).url(url).build();
+ return callForType(request);
+ }
+
@Override
String getResource() {
return RESOURCE;
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java b/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java
new file mode 100644
index 0000000000..2ef19cee79
--- /dev/null
+++ b/sdk/java-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) The Arvados Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0
+ *
+ */
+
+package org.arvados.client.api.model;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.HashMap;
+import java.util.Map;
+
+ at JsonInclude(JsonInclude.Include.NON_NULL)
+ at JsonIgnoreProperties(ignoreUnknown = true)
+public class CollectionReplaceFiles {
+
+ @JsonProperty("collection")
+ private CollectionOptions collectionOptions;
+
+ @JsonProperty("replace_files")
+ private Map<String, String> replaceFiles;
+
+ public CollectionReplaceFiles() {
+ this.collectionOptions = new CollectionOptions();
+ this.replaceFiles = new HashMap<>();
+ }
+
+ public void addFileReplacement(String targetPath, String sourcePath) {
+ this.replaceFiles.put(targetPath, sourcePath);
+ }
+
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ public static class CollectionOptions {
+ @JsonProperty("preserve_version")
+ private boolean preserveVersion;
+
+ public CollectionOptions() {
+ this.preserveVersion = true;
+ }
+
+ public boolean isPreserveVersion() {
+ return preserveVersion;
+ }
+
+ public void setPreserveVersion(boolean preserveVersion) {
+ this.preserveVersion = preserveVersion;
+ }
+ }
+
+ public CollectionOptions getCollectionOptions() {
+ return collectionOptions;
+ }
+
+ public void setCollectionOptions(CollectionOptions collectionOptions) {
+ this.collectionOptions = collectionOptions;
+ }
+
+ public Map<String, String> getReplaceFiles() {
+ return replaceFiles;
+ }
+
+ public void setReplaceFiles(Map<String, String> replaceFiles) {
+ this.replaceFiles = replaceFiles;
+ }
+}
\ No newline at end of file
diff --git a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
index 571cb25909..8b65cebc59 100644
--- a/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
+++ b/sdk/java-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java
@@ -28,6 +28,7 @@ import java.io.File;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
public class ArvadosFacade {
@@ -201,6 +202,21 @@ public class ArvadosFacade {
return collectionsApiClient.create(collection);
}
+ /**
+ * Uploads multiple files to an existing collection.
+ *
+ * @param collectionUUID UUID of collection to which the files are to be copied
+ * @param files map of files to be copied to existing collection.
+ * The map consists of a pair in the form of a filename and a filename
+ * along with the Portable data hash
+ * @return collection object mapped from JSON that is returned from server after successful copied
+ */
+ public Collection updateWithReplaceFiles(String collectionUUID, Map<String, String> files) {
+ CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();
+ replaceFilesRequest.getReplaceFiles().putAll(files);
+ return collectionsApiClient.update(collectionUUID, replaceFilesRequest);
+ }
+
/**
* Returns current user information based on Api Token provided via configuration
*
diff --git a/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java b/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
index 8da3bfbf51..94a79041a0 100644
--- a/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
+++ b/sdk/java-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java
@@ -7,21 +7,39 @@
package org.arvados.client.api.client;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
import okhttp3.mockwebserver.RecordedRequest;
import org.arvados.client.api.model.Collection;
import org.arvados.client.api.model.CollectionList;
+import org.arvados.client.api.model.CollectionReplaceFiles;
import org.arvados.client.test.utils.RequestMethod;
import org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;
+import org.junit.Before;
import org.junit.Test;
import static org.arvados.client.test.utils.ApiClientTestUtils.*;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.assertEquals;
public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
private static final String RESOURCE = "collections";
-
- private CollectionsApiClient client = new CollectionsApiClient(CONFIG);
+ private static final String TEST_COLLECTION_NAME = "Super Collection";
+ private static final String TEST_COLLECTION_UUID = "test-collection-uuid";
+ private ObjectMapper objectMapper;
+ private CollectionsApiClient client;
+
+ @Before
+ public void setUp() {
+ objectMapper = new ObjectMapper();
+ objectMapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
+ client = new CollectionsApiClient(CONFIG);
+ }
@Test
public void listCollections() throws Exception {
@@ -66,7 +84,7 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
// given
server.enqueue(getResponse("collections-create-simple"));
- String name = "Super Collection";
+ String name = TEST_COLLECTION_NAME;
Collection collection = new Collection();
collection.setName(name);
@@ -90,7 +108,7 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
// given
server.enqueue(getResponse("collections-create-manifest"));
- String name = "Super Collection";
+ String name = TEST_COLLECTION_NAME;
String manifestText = ". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a at 5a1d5708 0:70:README.md\n";
Collection collection = new Collection();
@@ -109,4 +127,45 @@ public class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {
assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
assertThat(actual.getManifestText()).isEqualTo(manifestText);
}
+
+ @Test
+ public void testUpdateWithReplaceFiles() throws IOException, InterruptedException {
+ // given
+ server.enqueue(getResponse("collections-create-manifest"));
+
+ Map<String, String> files = new HashMap<>();
+ files.put("targetPath1", "sourcePath1");
+ files.put("targetPath2", "sourcePath2");
+
+ CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();
+ replaceFilesRequest.setReplaceFiles(files);
+
+ // when
+ Collection actual = client.update(TEST_COLLECTION_UUID, replaceFilesRequest);
+
+ // then
+ RecordedRequest request = server.takeRequest();
+ assertAuthorizationHeader(request);
+ assertRequestPath(request, "collections/test-collection-uuid");
+ assertRequestMethod(request, RequestMethod.PUT);
+ assertThat(actual.getPortableDataHash()).isEqualTo("d41d8cd98f00b204e9800998ecf8427e+0");
+
+ String actualRequestBody = request.getBody().readUtf8();
+ Map<String, Object> actualRequestMap = objectMapper.readValue(actualRequestBody, Map.class);
+
+ Map<String, Object> expectedRequestMap = new HashMap<>();
+ Map<String, Object> collectionOptionsMap = new HashMap<>();
+ collectionOptionsMap.put("preserve_version", true);
+
+ Map<String, String> replaceFilesMap = new HashMap<>();
+ replaceFilesMap.put("targetPath1", "sourcePath1");
+ replaceFilesMap.put("targetPath2", "sourcePath2");
+
+ expectedRequestMap.put("collection", collectionOptionsMap);
+ expectedRequestMap.put("replace_files", replaceFilesMap);
+
+ String expectedJson = objectMapper.writeValueAsString(expectedRequestMap);
+ String actualJson = objectMapper.writeValueAsString(actualRequestMap);
+ assertEquals(expectedJson, actualJson);
+ }
}
commit 010c28c7cf3f7d63b6d89f039a72646c48bec4f3
Author: Peter Amstutz <peter.amstutz at curii.com>
Date: Fri Feb 2 16:44:13 2024 -0500
Merge branch '21304-user-update' refs #21304
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>
diff --git a/lib/controller/integration_test.go b/lib/controller/integration_test.go
index fc1f705175..4bf7a03447 100644
--- a/lib/controller/integration_test.go
+++ b/lib/controller/integration_test.go
@@ -971,8 +971,8 @@ func (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {
"hostname": "example",
},
})
+ c.Assert(err, check.IsNil)
c.Check(outVM.UUID[0:5], check.Equals, "z3333")
- c.Check(err, check.IsNil)
// Make sure z3333 user list is up to date
_, err = conn3.UserList(rootctx3, arvados.ListOptions{Limit: 1000})
diff --git a/services/api/app/models/user.rb b/services/api/app/models/user.rb
index 7def490618..d37fd573c7 100644
--- a/services/api/app/models/user.rb
+++ b/services/api/app/models/user.rb
@@ -656,13 +656,16 @@ SELECT target_uuid, perm_level
remote_should_be_active = should_activate && remote_user[:is_invited] != false && remote_user[:is_active] == true
+ # Make sure blank username is nil
+ remote_user[:username] = nil if remote_user[:username] == ""
+
begin
user = User.create_with(email: remote_user[:email],
username: remote_user[:username],
first_name: remote_user[:first_name],
last_name: remote_user[:last_name],
- is_active: remote_should_be_active
- ).find_or_create_by(uuid: remote_user[:uuid])
+ is_active: remote_should_be_active,
+ ).find_or_create_by(uuid: remote_user[:uuid])
rescue ActiveRecord::RecordNotUnique
retry
end
@@ -680,8 +683,9 @@ SELECT target_uuid, perm_level
loginCluster = Rails.configuration.Login.LoginCluster
if user.username.nil? || user.username == ""
- # Don't have a username yet, set one
- needupdate[:username] = user.set_initial_username(requested: remote_user[:username])
+ # Don't have a username yet, try to set one
+ initial_username = user.set_initial_username(requested: remote_user[:username])
+ needupdate[:username] = initial_username if !initial_username.nil?
elsif remote_user_prefix != loginCluster
# Upstream is not login cluster, don't try to change the
# username once set.
@@ -710,6 +714,14 @@ SELECT target_uuid, perm_level
end
raise # Not the issue we're handling above
end
+ elsif user.new_record?
+ begin
+ user.save!
+ rescue => e
+ Rails.logger.debug "Error saving user record: #{$!}"
+ Rails.logger.debug "Backtrace:\n\t#{e.backtrace.join("\n\t")}"
+ raise
+ end
end
if remote_should_be_unsetup
diff --git a/services/api/test/functional/arvados/v1/users_controller_test.rb b/services/api/test/functional/arvados/v1/users_controller_test.rb
index aeb7e6a88e..0ab46b1302 100644
--- a/services/api/test/functional/arvados/v1/users_controller_test.rb
+++ b/services/api/test/functional/arvados/v1/users_controller_test.rb
@@ -1101,6 +1101,37 @@ The Arvados team.
assert_equal(1, Log.where(object_uuid: unchanginguuid).count)
end
+ test 'batch update does not produce spurious log events' do
+ # test for bug #21304
+
+ existinguuid = 'remot-tpzed-foobarbazwazqux'
+ act_as_system_user do
+ User.create!(uuid: existinguuid,
+ first_name: 'root',
+ is_active: true,
+ )
+ end
+ assert_equal(1, Log.where(object_uuid: existinguuid).count)
+
+ Rails.configuration.Login.LoginCluster = 'remot'
+
+ authorize_with(:admin)
+ patch(:batch_update,
+ params: {
+ updates: {
+ existinguuid => {
+ 'first_name' => 'root',
+ 'email' => '',
+ 'username' => '',
+ 'is_active' => true,
+ 'is_invited' => true
+ },
+ }})
+ assert_response(:success)
+
+ assert_equal(1, Log.where(object_uuid: existinguuid).count)
+ end
+
NON_ADMIN_USER_DATA = ["uuid", "kind", "is_active", "is_admin", "is_invited", "email", "first_name",
"last_name", "username", "can_write", "can_manage"].sort
commit 1bbd9a87f928257d44cf1a4a7f576cd49ab3e062
Author: Peter Amstutz <peter.amstutz at curii.com>
Date: Fri Feb 2 16:00:02 2024 -0500
Merge branch '21216-multiplier-typo-fix' refs #21216
Arvados-DCO-1.1-Signed-off-by: Peter Amstutz <peter.amstutz at curii.com>
diff --git a/doc/user/cwl/cwl-extensions.html.textile.liquid b/doc/user/cwl/cwl-extensions.html.textile.liquid
index e05072ddf6..3c8366721d 100644
--- a/doc/user/cwl/cwl-extensions.html.textile.liquid
+++ b/doc/user/cwl/cwl-extensions.html.textile.liquid
@@ -73,7 +73,7 @@ hints:
usePreemptible: true
arv:OutOfMemoryRetry:
- memoryRetryMultipler: 2
+ memoryRetryMultiplier: 2
memoryErrorRegex: "custom memory error"
{% endcodeblock %}
@@ -195,7 +195,7 @@ table(table table-bordered table-condensed).
h2(#OutOfMemoryRetry). arv:OutOfMemoryRetry
-Specify that when a workflow step appears to have failed because it did not request enough RAM, it should be re-submitted with more RAM. Out of memory conditions are detected either by the container being unexpectedly killed (exit code 137) or by matching a pattern in the container's output (see @memoryErrorRegex@). Retrying will increase the base RAM request by the value of @memoryRetryMultipler at . For example, if the original RAM request was 10 GiB and the multiplier is 1.5, then it will re-submit with 15 GiB.
+Specify that when a workflow step appears to have failed because it did not request enough RAM, it should be re-submitted with more RAM. Out of memory conditions are detected either by the container being unexpectedly killed (exit code 137) or by matching a pattern in the container's output (see @memoryErrorRegex@). Retrying will increase the base RAM request by the value of @memoryRetryMultiplier at . For example, if the original RAM request was 10 GiB and the multiplier is 1.5, then it will re-submit with 15 GiB.
Containers are only re-submitted once. If it fails a second time after increasing RAM, then the worklow step will still fail.
@@ -203,7 +203,7 @@ Also note that expressions that use @$(runtime.ram)@ (such as dynamic command li
table(table table-bordered table-condensed).
|_. Field |_. Type |_. Description |
-|memoryRetryMultipler|float|Required, the retry will multiply the base memory request by this factor to get the retry memory request.|
+|memoryRetryMultiplier|float|Optional, default value is 2. The retry will multiply the base memory request by this factor to get the retry memory request.|
|memoryErrorRegex|string|Optional, a custom regex that, if found in the stdout, stderr or crunch-run logging of a program, will trigger a retry with greater RAM. If not provided, the default pattern matches "out of memory" (with or without spaces), "memory error" (with or without spaces), "bad_alloc" and "container using over 90% of memory".|
h2. arv:dockerCollectionPDH
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
index 91a05e1254..aeb41db568 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml
@@ -478,8 +478,13 @@ $graph:
and stderr produced by the tool to determine if a failed job
should be retried with more RAM. By default, searches for the
substrings 'bad_alloc' and 'OutOfMemory'.
- - name: memoryRetryMultipler
- type: float
+ - name: memoryRetryMultiplier
+ type: float?
doc: |
If the container failed on its first run, re-submit the
container with the RAM request multiplied by this factor.
+ - name: memoryRetryMultipler
+ type: float?
+ doc: |
+ Deprecated misspelling of "memoryRetryMultiplier". Kept only
+ for backwards compatability, don't use this.
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
index 458d5a37a7..0e51d50080 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml
@@ -421,8 +421,13 @@ $graph:
and stderr produced by the tool to determine if a failed job
should be retried with more RAM. By default, searches for the
substrings 'bad_alloc' and 'OutOfMemory'.
- - name: memoryRetryMultipler
- type: float
+ - name: memoryRetryMultiplier
+ type: float?
doc: |
If the container failed on its first run, re-submit the
container with the RAM request multiplied by this factor.
+ - name: memoryRetryMultipler
+ type: float?
+ doc: |
+ Deprecated misspelling of "memoryRetryMultiplier". Kept only
+ for backwards compatability, don't use this.
diff --git a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
index 389add4104..a753579c9a 100644
--- a/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
+++ b/sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml
@@ -424,11 +424,17 @@ $graph:
and stderr produced by the tool to determine if a failed job
should be retried with more RAM. By default, searches for the
substrings 'bad_alloc' and 'OutOfMemory'.
- - name: memoryRetryMultipler
- type: float
+ - name: memoryRetryMultiplier
+ type: float?
doc: |
If the container failed on its first run, re-submit the
container with the RAM request multiplied by this factor.
+ - name: memoryRetryMultipler
+ type: float?
+ doc: |
+ Deprecated misspelling of "memoryRetryMultiplier". Kept only
+ for backwards compatability, don't use this.
+
- name: SeparateRunner
type: record
diff --git a/sdk/cwl/arvados_cwl/arvcontainer.py b/sdk/cwl/arvados_cwl/arvcontainer.py
index 6e3e42975e..9c370f983e 100644
--- a/sdk/cwl/arvados_cwl/arvcontainer.py
+++ b/sdk/cwl/arvados_cwl/arvcontainer.py
@@ -370,8 +370,13 @@ class ArvadosContainer(JobBase):
ram_multiplier = [1]
oom_retry_req, _ = self.get_requirement("http://arvados.org/cwl#OutOfMemoryRetry")
- if oom_retry_req and oom_retry_req.get('memoryRetryMultipler'):
- ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+ if oom_retry_req:
+ if oom_retry_req.get('memoryRetryMultiplier'):
+ ram_multiplier.append(oom_retry_req.get('memoryRetryMultiplier'))
+ elif oom_retry_req.get('memoryRetryMultipler'):
+ ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))
+ else:
+ ram_multiplier.append(2)
if runtimeContext.runnerjob.startswith("arvwf:"):
wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index("#")]
diff --git a/sdk/cwl/tests/arvados-tests.yml b/sdk/cwl/tests/arvados-tests.yml
index e0bdd8a5a3..cb4a151f0e 100644
--- a/sdk/cwl/tests/arvados-tests.yml
+++ b/sdk/cwl/tests/arvados-tests.yml
@@ -485,6 +485,11 @@
tool: oom/19975-oom.cwl
doc: "Test feature 19975 - retry on exit 137"
+- job: oom/fakeoom.yml
+ output: {}
+ tool: oom/19975-oom-mispelled.cwl
+ doc: "Test feature 19975 - retry on exit 137, old misspelled version"
+
- job: oom/fakeoom2.yml
output: {}
tool: oom/19975-oom.cwl
diff --git a/sdk/cwl/tests/oom/19975-oom.cwl b/sdk/cwl/tests/oom/19975-oom-mispelled.cwl
similarity index 86%
copy from sdk/cwl/tests/oom/19975-oom.cwl
copy to sdk/cwl/tests/oom/19975-oom-mispelled.cwl
index ec80648716..bbd26b9c9a 100644
--- a/sdk/cwl/tests/oom/19975-oom.cwl
+++ b/sdk/cwl/tests/oom/19975-oom-mispelled.cwl
@@ -8,6 +8,7 @@ $namespaces:
arv: "http://arvados.org/cwl#"
hints:
arv:OutOfMemoryRetry:
+ # legacy misspelled name, should behave exactly the same
memoryRetryMultipler: 2
ResourceRequirement:
ramMin: 256
diff --git a/sdk/cwl/tests/oom/19975-oom.cwl b/sdk/cwl/tests/oom/19975-oom.cwl
index ec80648716..bf3e5cc389 100644
--- a/sdk/cwl/tests/oom/19975-oom.cwl
+++ b/sdk/cwl/tests/oom/19975-oom.cwl
@@ -8,7 +8,7 @@ $namespaces:
arv: "http://arvados.org/cwl#"
hints:
arv:OutOfMemoryRetry:
- memoryRetryMultipler: 2
+ memoryRetryMultiplier: 2
ResourceRequirement:
ramMin: 256
arv:APIRequirement: {}
diff --git a/sdk/cwl/tests/oom/19975-oom3.cwl b/sdk/cwl/tests/oom/19975-oom3.cwl
index af3271b847..bbca110b6f 100644
--- a/sdk/cwl/tests/oom/19975-oom3.cwl
+++ b/sdk/cwl/tests/oom/19975-oom3.cwl
@@ -8,7 +8,7 @@ $namespaces:
arv: "http://arvados.org/cwl#"
hints:
arv:OutOfMemoryRetry:
- memoryRetryMultipler: 2
+ memoryRetryMultiplier: 2
memoryErrorRegex: Whoops
ResourceRequirement:
ramMin: 256
-----------------------------------------------------------------------
hooks/post-receive
--
More information about the arvados-commits
mailing list