diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 75d7d82c5e11..cca0d3a92f18 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -51,6 +51,7 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.protocol.ListStatusLightOptions; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BasicOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -1433,9 +1434,21 @@ List getNextShallowListOfKeys(String prevKey) } // 2. Get immediate children by listStatusLight method - List statuses = - proxy.listStatusLight(volumeName, name, delimiterKeyPrefix, false, - startKey, listCacheSize, false); + // When delimiterKeyPrefix is "" (root listing), pass getKeyPrefix() as listPrefix + // for STS auth so OM checks LIST on that prefix instead of "*". + final String listPrefix = (delimiterKeyPrefix.isEmpty() && !getKeyPrefix().isEmpty()) + ? getKeyPrefix() : null; + final List statuses = proxy.listStatusLight( + ListStatusLightOptions.builder() + .setVolumeName(volumeName) + .setBucketName(name) + .setKeyName(delimiterKeyPrefix) + .setRecursive(false) + .setStartKey(startKey) + .setNumEntries(listCacheSize) + .setAllowPartialPrefixes(false) + .setListPrefix(listPrefix) + .build()); if (addedKeyPrefix && !statuses.isEmpty()) { // previous round already include the startKey, so remove it @@ -1674,9 +1687,21 @@ List getNextShallowListOfKeys(String prevKey) } // 2. Get immediate children by listStatus method. - List statuses = - proxy.listStatusLight(volumeName, name, getDelimiterKeyPrefix(), - false, startKey, listCacheSize, false); + // When delimiterKeyPrefix is "" (root listing), pass getKeyPrefix() as listPrefix + // for STS auth so OM checks LIST on that prefix instead of "*". + String listPrefix = (getDelimiterKeyPrefix().isEmpty() && !getKeyPrefix().isEmpty()) + ? getKeyPrefix() : null; + List statuses = proxy.listStatusLight( + ListStatusLightOptions.builder() + .setVolumeName(volumeName) + .setBucketName(name) + .setKeyName(getDelimiterKeyPrefix()) + .setRecursive(false) + .setStartKey(startKey) + .setNumEntries(listCacheSize) + .setAllowPartialPrefixes(false) + .setListPrefix(listPrefix) + .build()); if (!statuses.isEmpty()) { // If findFirstStartKey is false, indicates that the keyPrefix is an diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 96e8b654474e..1877d6bbed8a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1003,6 +1003,16 @@ List listStatus(String volumeName, String bucketName, /** * Lightweight listStatus API. * + * @param options Encapsulates volume, bucket, key, recursive, startKey, + * numEntries, allowPartialPrefixes, and optional listPrefix. + * @return list of file status + */ + List listStatusLight(ListStatusLightOptions options) + throws IOException; + + /** + * Lightweight listStatus API (convenience overload without listPrefix). + * * @param volumeName Volume name * @param bucketName Bucket name * @param keyName Absolute path of the entry to be listed @@ -1015,9 +1025,12 @@ List listStatus(String volumeName, String bucketName, * this is needed in context of ListKeys * @return list of file status */ - List listStatusLight(String volumeName, + default List listStatusLight(String volumeName, String bucketName, String keyName, boolean recursive, String startKey, - long numEntries, boolean allowPartialPrefixes) throws IOException; + long numEntries, boolean allowPartialPrefixes) throws IOException { + return listStatusLight(ListStatusLightOptions.of(volumeName, bucketName, + keyName, recursive, startKey, numEntries, allowPartialPrefixes)); + } /** * Add acl for Ozone object. Return true if acl is added successfully else diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ListStatusLightOptions.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ListStatusLightOptions.java new file mode 100644 index 000000000000..28fff17bbf59 --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ListStatusLightOptions.java @@ -0,0 +1,182 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client.protocol; + +import java.util.Objects; + +/** + * Options for {@link ClientProtocol#listStatusLight(ListStatusLightOptions)}. + * Encapsulates all parameters to allow future extensibility without breaking + * the method signature. + */ +public final class ListStatusLightOptions { + + private final String volumeName; + private final String bucketName; + private final String keyName; + private final boolean recursive; + private final String startKey; + private final long numEntries; + private final boolean allowPartialPrefixes; + // When keyName is empty (root listing), this is the original S3/list + // prefix for STS auth. Enables LIST check on this prefix instead of "*". + private final String listPrefix; + + private ListStatusLightOptions(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.recursive = b.recursive; + this.startKey = b.startKey; + this.numEntries = b.numEntries; + this.allowPartialPrefixes = b.allowPartialPrefixes; + this.listPrefix = b.listPrefix; + } + + public String getVolumeName() { + return volumeName; + } + + public String getBucketName() { + return bucketName; + } + + public String getKeyName() { + return keyName; + } + + public boolean isRecursive() { + return recursive; + } + + public String getStartKey() { + return startKey; + } + + public long getNumEntries() { + return numEntries; + } + + public boolean isAllowPartialPrefixes() { + return allowPartialPrefixes; + } + + public String getListPrefix() { + return listPrefix; + } + + public static Builder builder() { + return new Builder(); + } + + /** + * Convenience factory for the common case (no listPrefix). + */ + public static ListStatusLightOptions of(String volumeName, String bucketName, + String keyName, boolean recursive, String startKey, long numEntries, + boolean allowPartialPrefixes) { + return builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setRecursive(recursive) + .setStartKey(startKey) + .setNumEntries(numEntries) + .setAllowPartialPrefixes(allowPartialPrefixes) + .build(); + } + + /** + * Builder for ListStatusLightOptions. + */ + public static final class Builder { + private String volumeName; + private String bucketName; + private String keyName; + private boolean recursive; + private String startKey; + private long numEntries; + private boolean allowPartialPrefixes; + private String listPrefix; + + public Builder setVolumeName(String volumeName) { + this.volumeName = volumeName; + return this; + } + + public Builder setBucketName(String bucketName) { + this.bucketName = bucketName; + return this; + } + + public Builder setKeyName(String keyName) { + this.keyName = keyName; + return this; + } + + public Builder setRecursive(boolean recursive) { + this.recursive = recursive; + return this; + } + + public Builder setStartKey(String startKey) { + this.startKey = startKey; + return this; + } + + public Builder setNumEntries(long numEntries) { + this.numEntries = numEntries; + return this; + } + + public Builder setAllowPartialPrefixes(boolean allowPartialPrefixes) { + this.allowPartialPrefixes = allowPartialPrefixes; + return this; + } + + public Builder setListPrefix(String listPrefix) { + this.listPrefix = listPrefix; + return this; + } + + public ListStatusLightOptions build() { + return new ListStatusLightOptions(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ListStatusLightOptions that = (ListStatusLightOptions) o; + return recursive == that.recursive && numEntries == that.numEntries && + allowPartialPrefixes == that.allowPartialPrefixes && Objects.equals(volumeName, that.volumeName) && + Objects.equals(bucketName, that.bucketName) && Objects.equals(keyName, that.keyName) && + Objects.equals(startKey, that.startKey) && Objects.equals(listPrefix, that.listPrefix); + } + + @Override + public int hashCode() { + return Objects.hash( + volumeName, bucketName, keyName, recursive, startKey, numEntries, allowPartialPrefixes, listPrefix); + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 02fc9eed32f0..482bb9fd9234 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -126,6 +126,7 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.protocol.ListStatusLightOptions; import org.apache.hadoop.ozone.om.OmConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.AssumeRoleResponseInfo; @@ -2303,16 +2304,21 @@ public List listStatus(String volumeName, String bucketName, } @Override - public List listStatusLight(String volumeName, - String bucketName, String keyName, boolean recursive, String startKey, - long numEntries, boolean allowPartialPrefixes) throws IOException { - OmKeyArgs keyArgs = prepareOmKeyArgs(volumeName, bucketName, keyName); + public List listStatusLight(ListStatusLightOptions options) + throws IOException { + OmKeyArgs keyArgs = prepareOmKeyArgs(options.getVolumeName(), + options.getBucketName(), options.getKeyName()); + if (options.getListPrefix() != null && !options.getListPrefix().isEmpty()) { + keyArgs = keyArgs.toBuilder().setListPrefix(options.getListPrefix()).build(); + } if (omVersion.compareTo(OzoneManagerVersion.LIGHTWEIGHT_LIST_STATUS) >= 0) { - return ozoneManagerClient.listStatusLight(keyArgs, recursive, startKey, - numEntries, allowPartialPrefixes); + return ozoneManagerClient.listStatusLight( + keyArgs, options.isRecursive(), options.getStartKey(), options.getNumEntries(), + options.isAllowPartialPrefixes()); } else { - return ozoneManagerClient.listStatus(keyArgs, recursive, startKey, - numEntries, allowPartialPrefixes) + return ozoneManagerClient.listStatus( + keyArgs, options.isRecursive(), options.getStartKey(), options.getNumEntries(), + options.isAllowPartialPrefixes()) .stream() .map(OzoneFileStatusLight::fromOzoneFileStatus) .collect(Collectors.toList()); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index dfe0329fbe67..b5cdcc525402 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -61,6 +61,9 @@ public final class OmKeyArgs extends WithMetadata implements Auditable { // This allows a key to be created an committed atomically if the original has not // been modified. private Long expectedDataGeneration = null; + // Original S3/list prefix when keyName is empty (root listing). Used for STS + // auth to check LIST on this prefix instead of "*". + private final String listPrefix; private OmKeyArgs(Builder b) { super(b); @@ -82,6 +85,7 @@ private OmKeyArgs(Builder b) { this.ownerName = b.ownerName; this.tags = b.tags.build(); this.expectedDataGeneration = b.expectedDataGeneration; + this.listPrefix = b.listPrefix; } public boolean getIsMultipartKey() { @@ -164,6 +168,14 @@ public Long getExpectedDataGeneration() { return expectedDataGeneration; } + /** + * Original S3/list prefix when keyName is empty (root listing). + * Used for STS auth to check LIST on this prefix instead of "*". + */ + public String getListPrefix() { + return listPrefix; + } + @Override public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); @@ -234,6 +246,7 @@ public static class Builder extends WithMetadata.Builder { private boolean forceUpdateContainerCacheFromSCM; private final MapBuilder tags; private Long expectedDataGeneration = null; + private String listPrefix = null; public Builder() { this(AclListBuilder.empty()); @@ -265,6 +278,7 @@ public Builder(OmKeyArgs obj) { this.expectedDataGeneration = obj.expectedDataGeneration; this.tags = MapBuilder.of(obj.tags); this.acls = AclListBuilder.of(obj.acls); + this.listPrefix = obj.listPrefix; } public Builder setVolumeName(String volume) { @@ -398,6 +412,11 @@ public Builder setExpectedDataGeneration(long generation) { return this; } + public Builder setListPrefix(String prefix) { + this.listPrefix = prefix; + return this; + } + public OmKeyArgs build() { return new OmKeyArgs(this); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 10dc29c97e8c..9ca351bbb5a7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -2370,7 +2370,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, .build(); ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, - numEntries, allowPartialPrefixes); + numEntries, allowPartialPrefixes, null); OMRequest omRequest = createOMRequest(Type.ListStatus) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2399,7 +2399,7 @@ public List listStatusLight(OmKeyArgs args, .build(); ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, - numEntries, allowPartialPrefixes); + numEntries, allowPartialPrefixes, args.getListPrefix()); OMRequest omRequest = createOMRequest(Type.ListStatusLight) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2417,7 +2417,7 @@ public List listStatusLight(OmKeyArgs args, } private ListStatusRequest.Builder createListStatusRequestBuilder(KeyArgs keyArgs, boolean recursive, String startKey, - long numEntries, boolean allowPartialPrefixes) { + long numEntries, boolean allowPartialPrefixes, String listPrefix) { ListStatusRequest.Builder listStatusRequestBuilder = ListStatusRequest.newBuilder() .setKeyArgs(keyArgs) @@ -2433,6 +2433,9 @@ private ListStatusRequest.Builder createListStatusRequestBuilder(KeyArgs keyArgs if (allowPartialPrefixes) { listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); } + if (listPrefix != null && !listPrefix.isEmpty()) { + listStatusRequestBuilder.setListPrefix(listPrefix); + } return listStatusRequestBuilder; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/iam/IamSessionPolicyResolver.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/iam/IamSessionPolicyResolver.java index b8b032f1b358..808b4b131691 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/iam/IamSessionPolicyResolver.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/iam/IamSessionPolicyResolver.java @@ -97,6 +97,8 @@ public final class IamSessionPolicyResolver { private static final String[] S3_ACTION_PREFIXES = {"s3:Get", "s3:Put", "s3:List", "s3:Delete", "s3:Create"}; private static final String ERROR_PREFIX = "IAM session policy: "; + private static final String STRING_EQUALS = "StringEquals"; + private static final String STRING_LIKE = "StringLike"; @VisibleForTesting static final Map> S3_ACTION_MAP_CI = buildCaseInsensitiveS3ActionMap(); @@ -143,7 +145,7 @@ public static Set resolve(String policyJson, Strin final Set resources = readStringOrArray(stmt.get("Resource")); // Parse prefixes from conditions, if any - final Set prefixes = parsePrefixesFromConditions(stmt); + final Condition condition = parsePrefixesFromConditions(stmt); // Map actions to S3Action enum if possible final Set mappedS3Actions = mapPolicyActionsToS3Actions(actions); @@ -152,11 +154,20 @@ public static Set resolve(String policyJson, Strin continue; } + // s3:prefix is only applicable to the ListBucket action because we don't support ListBucketVersions + // (see https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html and search for + // s3:prefix). If a statement carries a Condition, non-ListBucket actions (ex GetObject, PutObject, + // ListBucketMultipartUploads, etc.) in that statement do not apply. + final Set filteredS3Actions = filterActionsWhenConditionPresent(mappedS3Actions, condition); + if (filteredS3Actions.isEmpty()) { + continue; + } + // Categorize resources according to bucket resource, object resource, etc final Set resourceSpecs = validateAndCategorizeResources(authorizerType, resources); // For each action, map to Ozone objects (paths) and acls based on resource specs and prefixes - createPathsAndPermissions(volumeName, authorizerType, mappedS3Actions, resourceSpecs, prefixes, objToAclsMap); + createPathsAndPermissions(volumeName, authorizerType, filteredS3Actions, resourceSpecs, condition, objToAclsMap); } // Group accumulated objects by their ACL sets to create final result @@ -263,10 +274,10 @@ private static Set readStringOrArray(JsonNode node) { * that if there is a Condition, there is only one and that the Condition * operator and key name are supported. *

- * Only the StringEquals operator and s3:prefix key name are supported. + * Only the StringEquals and StringLike operators and s3:prefix key name are supported. */ - private static Set parsePrefixesFromConditions(JsonNode stmt) throws OMException { - Set prefixes = Collections.emptySet(); + private static Condition parsePrefixesFromConditions(JsonNode stmt) throws OMException { + Condition condition = null; final JsonNode cond = stmt.get("Condition"); if (cond != null && !cond.isMissingNode() && !cond.isNull()) { if (cond.size() != 1) { @@ -275,12 +286,12 @@ private static Set parsePrefixesFromConditions(JsonNode stmt) throws OME if (!cond.isObject()) { throw new OMException( - ERROR_PREFIX + "Invalid Condition (must have operator StringEquals or StringLike " + - "and key name s3:prefix) - " + cond, MALFORMED_POLICY_DOCUMENT); + ERROR_PREFIX + "Invalid Condition (must have operator " + STRING_EQUALS + " or " + STRING_LIKE + + " and key name s3:prefix) - " + cond, MALFORMED_POLICY_DOCUMENT); } final String operator = cond.fieldNames().next(); - if (!"StringEquals".equals(operator) && !"StringLike".equals(operator)) { + if (!STRING_EQUALS.equals(operator) && !STRING_LIKE.equals(operator)) { throw new OMException(ERROR_PREFIX + "Unsupported Condition operator - " + operator, NOT_SUPPORTED_OPERATION); } @@ -300,10 +311,11 @@ private static Set parsePrefixesFromConditions(JsonNode stmt) throws OME throw new OMException(ERROR_PREFIX + "Unsupported Condition key name - " + keyName, NOT_SUPPORTED_OPERATION); } - prefixes = readStringOrArray(operatorValue.get(keyName)); + final Set prefixes = readStringOrArray(operatorValue.get(keyName)); + condition = new Condition(operator, prefixes); } - return prefixes; + return condition; } /** @@ -356,6 +368,23 @@ static Set mapPolicyActionsToS3Actions(Set actions) { return mappedActions; } + /** + * Filters out actions when a Condition is present if the action is not ListBucket. + */ + private static Set filterActionsWhenConditionPresent(Set mappedS3Actions, Condition condition) { + if (condition == null) { + return mappedS3Actions; + } + + if (mappedS3Actions.contains(S3Action.LIST_BUCKET) || mappedS3Actions.contains(S3Action.ALL_S3)) { + final Set filteredActions = new HashSet<>(); + filteredActions.add(S3Action.LIST_BUCKET); + return filteredActions; + } + + return Collections.emptySet(); + } + /** * Validates that wildcard bucket patterns are not used with native authorizer. */ @@ -427,10 +456,11 @@ static Set validateAndCategorizeResources(AuthorizerType authorize */ @VisibleForTesting static void createPathsAndPermissions(String volumeName, AuthorizerType authorizerType, Set mappedS3Actions, - Set resourceSpecs, Set prefixes, Map> objToAclsMap) { + Set resourceSpecs, Condition condition, Map> objToAclsMap) { // Process each resource spec with the given actions for (ResourceSpec resourceSpec : resourceSpecs) { - processResourceSpecWithActions(volumeName, authorizerType, mappedS3Actions, resourceSpec, prefixes, objToAclsMap); + processResourceSpecWithActions( + volumeName, authorizerType, mappedS3Actions, resourceSpec, condition, objToAclsMap); } } @@ -461,7 +491,7 @@ static Set groupObjectsByAcls(Map mappedS3Actions, ResourceSpec resourceSpec, Set prefixes, + Set mappedS3Actions, ResourceSpec resourceSpec, Condition condition, Map> objToAclsMap) { // Process based on ResourceSpec type @@ -470,16 +500,16 @@ private static void processResourceSpecWithActions(String volumeName, Authorizer Preconditions.checkArgument( authorizerType != AuthorizerType.NATIVE, "ResourceSpec type ANY not supported for OzoneNativeAuthorizer"); - processResourceTypeAny(volumeName, mappedS3Actions, objToAclsMap); + processResourceTypeAny(volumeName, authorizerType, mappedS3Actions, condition, objToAclsMap); break; case BUCKET: - processBucketResource(volumeName, mappedS3Actions, resourceSpec, prefixes, authorizerType, objToAclsMap); + processBucketResource(volumeName, mappedS3Actions, resourceSpec, condition, authorizerType, objToAclsMap); break; case BUCKET_WILDCARD: Preconditions.checkArgument( authorizerType != AuthorizerType.NATIVE, "ResourceSpec type BUCKET_WILDCARD not supported for OzoneNativeAuthorizer"); - processBucketResource(volumeName, mappedS3Actions, resourceSpec, prefixes, authorizerType, objToAclsMap); + processBucketResource(volumeName, mappedS3Actions, resourceSpec, condition, authorizerType, objToAclsMap); break; case OBJECT_EXACT: processObjectExactResource(volumeName, mappedS3Actions, resourceSpec, objToAclsMap); @@ -505,12 +535,24 @@ private static void processResourceSpecWithActions(String volumeName, Authorizer * Handles ResourceType.ANY (*). * Example: "Resource": "*" */ - private static void processResourceTypeAny(String volumeName, Set mappedS3Actions, - Map> objToAclsMap) { + private static void processResourceTypeAny(String volumeName, AuthorizerType authorizerType, + Set mappedS3Actions, Condition condition, Map> objToAclsMap) { for (S3Action action : mappedS3Actions) { addAclsForObj(objToAclsMap, volumeObj(volumeName), action.volumePerms); addAclsForObj(objToAclsMap, bucketObj(volumeName, "*"), action.bucketPerms); - addAclsForObj(objToAclsMap, keyObj(volumeName, "*", "*"), action.objectPerms); + if (condition != null && condition.prefixes != null && !condition.prefixes.isEmpty() && + (action == S3Action.LIST_BUCKET || action == S3Action.ALL_S3)) { + for (String prefix : condition.prefixes) { + // If operator is StringEquals, ignore wildcard prefixes. + if (STRING_EQUALS.equals(condition.operator) && hasWildcard(prefix)) { + continue; + } + createObjectResourcesFromConditionPrefix( + volumeName, authorizerType, ResourceSpec.any(), prefix, objToAclsMap, EnumSet.of(LIST)); + } + } else { + addAclsForObj(objToAclsMap, keyObj(volumeName, "*", "*"), action.objectPerms); + } } } @@ -520,7 +562,7 @@ private static void processResourceTypeAny(String volumeName, Set mapp * "Resource": "arn:aws:s3:::*" */ private static void processBucketResource(String volumeName, Set mappedS3Actions, - ResourceSpec resourceSpec, Set prefixes, AuthorizerType authorizerType, + ResourceSpec resourceSpec, Condition condition, AuthorizerType authorizerType, Map> objToAclsMap) { for (S3Action action : mappedS3Actions) { // The s3:ListAllMyBuckets action can use either "*" or @@ -548,15 +590,19 @@ private static void processBucketResource(String volumeName, Set mappe if (action == S3Action.LIST_BUCKET || action == S3Action.ALL_S3) { // If condition prefixes are present, these would constrain the object permissions if the action // is s3:ListBucket or s3:* (which includes s3:ListBucket) - if (prefixes != null && !prefixes.isEmpty()) { - for (String prefix : prefixes) { + if (condition != null && condition.prefixes != null && !condition.prefixes.isEmpty()) { + for (String prefix : condition.prefixes) { + // If operator is StringEquals, we should ignore any prefix containing wildcards + if (STRING_EQUALS.equals(condition.operator) && hasWildcard(prefix)) { + continue; + } createObjectResourcesFromConditionPrefix( - volumeName, authorizerType, resourceSpec, prefix, objToAclsMap, EnumSet.of(READ)); + volumeName, authorizerType, resourceSpec, prefix, objToAclsMap, EnumSet.of(LIST)); } - } else { - // No condition prefixes, but we need READ access to all objects, so use "*" as the prefix + } else if (condition == null) { + // No condition prefixes, but we need LIST access to all objects, so use "*" as the prefix createObjectResourcesFromConditionPrefix( - volumeName, authorizerType, resourceSpec, "*", objToAclsMap, EnumSet.of(READ)); + volumeName, authorizerType, resourceSpec, "*", objToAclsMap, EnumSet.of(LIST)); } } } @@ -590,19 +636,21 @@ private static void processObjectExactResource(String volumeName, Set private static void processObjectPrefixResource(String volumeName, AuthorizerType authorizerType, Set mappedS3Actions, ResourceSpec resourceSpec, Map> objToAclsMap) { for (S3Action action : mappedS3Actions) { - // Object actions apply to prefix/key resources + // Object actions apply to prefix/key resources - ensure to add the acls only for the appropriate action type if (action.kind == ActionKind.OBJECT) { addAclsForObj(objToAclsMap, volumeObj(volumeName), action.volumePerms); addAclsForObj(objToAclsMap, bucketObj(volumeName, resourceSpec.bucket), action.bucketPerms); + // Handle the resource prefix itself (e.g., my-bucket/*) + createObjectResourcesFromResourcePrefix( + volumeName, authorizerType, resourceSpec, objToAclsMap, action.objectPerms); } else if (action == S3Action.ALL_S3) { addAclsForObj(objToAclsMap, volumeObj(volumeName), EnumSet.of(READ)); // For s3:*, ALL should only apply at the object/prefix level; grant READ at bucket level for navigation addAclsForObj(objToAclsMap, bucketObj(volumeName, resourceSpec.bucket), EnumSet.of(READ)); + // Handle the resource prefix itself (e.g., my-bucket/*) + createObjectResourcesFromResourcePrefix( + volumeName, authorizerType, resourceSpec, objToAclsMap, action.objectPerms); } - - // Handle the resource prefix itself (e.g., my-bucket/*) - createObjectResourcesFromResourcePrefix( - volumeName, authorizerType, resourceSpec, objToAclsMap, action.objectPerms); } } @@ -708,6 +756,20 @@ enum S3ResourceType { OBJECT_EXACT } + /** + * Encapsulates the Condition operator and values. + */ + @VisibleForTesting + public static final class Condition { + private final String operator; + private final Set prefixes; + + public Condition(String operator, Set prefixes) { + this.operator = operator; + this.prefixes = prefixes; + } + } + /** * Utility to help categorize IAM policy resources, whether for bucket, key, wildcards, etc. */ @@ -809,7 +871,7 @@ enum S3Action { GET_BUCKET_LOCATION("s3:GetBucketLocation", ActionKind.BUCKET, EnumSet.of(READ), EnumSet.of(READ), EnumSet.noneOf(ACLType.class)), // Used for HeadBucket, ListObjects and ListObjectsV2 apis - LIST_BUCKET("s3:ListBucket", ActionKind.BUCKET, EnumSet.of(READ), EnumSet.of(READ, LIST), EnumSet.of(READ)), + LIST_BUCKET("s3:ListBucket", ActionKind.BUCKET, EnumSet.of(READ), EnumSet.of(READ, LIST), EnumSet.of(LIST)), // Used for ListMultipartUploads API LIST_BUCKET_MULTIPART_UPLOADS("s3:ListBucketMultipartUploads", ActionKind.BUCKET, EnumSet.of(READ), EnumSet.of(READ, LIST), EnumSet.noneOf(ACLType.class)), @@ -902,4 +964,8 @@ private static IOzoneObj volumeObj(String volumeName) { .setVolumeName(volumeName) .build(); } + + private static boolean hasWildcard(String prefix) { + return ((prefix.contains("*") || prefix.contains("?"))); + } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/iam/TestIamSessionPolicyResolver.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/iam/TestIamSessionPolicyResolver.java index 41d2fc338f30..ec4a0ea514b7 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/iam/TestIamSessionPolicyResolver.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/iam/TestIamSessionPolicyResolver.java @@ -707,17 +707,15 @@ public void testCreatePathsAndPermissionsWithResourceAny() { new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.ANY, "*", null, null)); expectIllegalArgumentException( - () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), new LinkedHashMap<>()), + () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, new LinkedHashMap<>()), "ResourceSpec type ANY not supported for OzoneNativeAuthorizer"); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); - final Set readAndListObjects = objSet(volume(), bucket("*")); // volume, bucket level have READ, LIST - final Set readObject = objSet(key("*", "*")); // key level has READ - assertThat(resultRanger).containsExactlyInAnyOrder( - new OzoneGrant(readAndListObjects, acls(READ, LIST)), - new OzoneGrant(readObject, acls(READ))); + // volume, bucket level, key have READ, LIST + final Set readAndListObjects = objSet(volume(), bucket("*"), key("*", "*")); + assertThat(resultRanger).containsExactlyInAnyOrder(new OzoneGrant(readAndListObjects, acls(READ, LIST))); } @Test @@ -728,18 +726,22 @@ public void testCreatePathsAndPermissionsWithBucketResourceThatIsListBucket() { final Set readAndListObject = objSet(bucket("bucket1")); final Map> objToAclsMapNative = new LinkedHashMap<>(); - final Set nativeReadObjects = objSet(volume(), prefix("bucket1", "")); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + final Set nativeListObject = objSet(prefix("bucket1", "")); + final Set nativeReadObject = objSet(volume()); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactlyInAnyOrder( - new OzoneGrant(readAndListObject, acls(READ, LIST)), new OzoneGrant(nativeReadObjects, acls(READ))); + new OzoneGrant(readAndListObject, acls(READ, LIST)), new OzoneGrant(nativeListObject, acls(LIST)), + new OzoneGrant(nativeReadObject, acls(READ))); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - final Set rangerReadObjects = objSet(volume(), key("bucket1", "*")); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + final Set rangerListObject = objSet(key("bucket1", "*")); + final Set rangerReadObject = objSet(volume()); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( - new OzoneGrant(readAndListObject, acls(READ, LIST)), new OzoneGrant(rangerReadObjects, acls(READ))); + new OzoneGrant(readAndListObject, acls(READ, LIST)), new OzoneGrant(rangerListObject, acls(LIST)), + new OzoneGrant(rangerReadObject, acls(READ))); } @Test @@ -751,13 +753,13 @@ public void testCreatePathsAndPermissionsWithBucketResourceThatIsNotListBucket() final Set readObject = objSet(volume()); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactlyInAnyOrder( new OzoneGrant(createObject, acls(CREATE)), new OzoneGrant(readObject, acls(READ))); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( new OzoneGrant(createObject, acls(CREATE)), new OzoneGrant(readObject, acls(READ))); @@ -772,11 +774,11 @@ public void testCreatePathsAndPermissionsWithBucketWildcardResource() { final Set readVolume = objSet(volume()); expectIllegalArgumentException( - () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), new LinkedHashMap<>()), + () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, new LinkedHashMap<>()), "ResourceSpec type BUCKET_WILDCARD not supported for OzoneNativeAuthorizer"); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( new OzoneGrant(writeAclObject, acls(WRITE_ACL)), new OzoneGrant(readVolume, acls(READ))); @@ -794,19 +796,19 @@ public void testCreatePathsAndPermissionsWithBucketsWildcardResourceAll() { new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.BUCKET_WILDCARD, "*", null, null)); expectIllegalArgumentException( - () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), new LinkedHashMap<>()), + () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, new LinkedHashMap<>()), "ResourceSpec type BUCKET_WILDCARD not supported for OzoneNativeAuthorizer"); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); // Both the volume and the wildcard bucket should end up with READ + LIST permissions. - // We also need READ access on the keys + // We also need LIST access on the keys final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); final Set readAndListObjects = objSet(volume(), bucket("*")); - final Set readObjects = objSet(key("*", "*")); + final Set listObjects = objSet(key("*", "*")); assertThat(resultRanger).containsExactlyInAnyOrder( - new OzoneGrant(readAndListObjects, acls(READ, LIST)), new OzoneGrant(readObjects, acls(READ))); + new OzoneGrant(readAndListObjects, acls(READ, LIST)), new OzoneGrant(listObjects, acls(LIST))); } @Test @@ -817,12 +819,12 @@ public void testCreatePathsAndPermissionsWithObjectExactResource() { final Set readObjects = objSet(key("bucket1", "key.txt"), bucket("bucket1"), volume()); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactly(new OzoneGrant(readObjects, acls(READ))); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactly(new OzoneGrant(readObjects, acls(READ))); } @@ -835,12 +837,12 @@ public void testCreatePathsAndPermissionsWithObjectPrefixResource() { new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.OBJECT_PREFIX, "bucket1", "prefix/", null)); final Set nativeReadObjects = objSet(prefix("bucket1", "prefix/"), bucket("bucket1"), volume()); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactly(new OzoneGrant(nativeReadObjects, acls(READ))); expectIllegalArgumentException( - () -> createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), new LinkedHashMap<>()), + () -> createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, new LinkedHashMap<>()), "ResourceSpec type OBJECT_PREFIX not supported for RangerOzoneAuthorizer"); } @@ -851,12 +853,12 @@ public void testCreatePathsAndPermissionsWithObjectPrefixWildcardResource() { new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.OBJECT_PREFIX_WILDCARD, "bucket1", "prefix/*", null)); expectIllegalArgumentException( - () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), new LinkedHashMap<>()), + () -> createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, new LinkedHashMap<>()), "ResourceSpec type OBJECT_PREFIX_WILDCARD not supported for OzoneNativeAuthorizer"); final Set rangerReadObjects = objSet(key("bucket1", "prefix/*"), bucket("bucket1"), volume()); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactly(new OzoneGrant(rangerReadObjects, acls(READ))); } @@ -865,12 +867,14 @@ public void testCreatePathsAndPermissionsWithObjectPrefixWildcardResource() { public void testCreatePathsAndPermissionsWithConditionPrefixesForObjectActionMustIgnoreConditionPrefixes() { final Set actions = Collections.singleton(S3Action.GET_OBJECT); final Set prefixes = strSet("folder1/", "folder2/"); + final IamSessionPolicyResolver.Condition condition = new IamSessionPolicyResolver.Condition( + "StringEquals", prefixes); final Set nativeResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.OBJECT_PREFIX, "bucket1", "", null)); final Map> objToAclsMapNative = new LinkedHashMap<>(); final Set nativeReadObjects = objSet(prefix("bucket1", ""), bucket("bucket1"), volume()); - createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, prefixes, objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, condition, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactly(new OzoneGrant(nativeReadObjects, acls(READ))); @@ -878,7 +882,7 @@ public void testCreatePathsAndPermissionsWithConditionPrefixesForObjectActionMus new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.OBJECT_PREFIX_WILDCARD, "bucket1", "*", null)); final Map> objToAclsMapRanger = new LinkedHashMap<>(); final Set rangerReadObjects = objSet(key("bucket1", "*"), bucket("bucket1"), volume()); - createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, prefixes, objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, condition, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactly(new OzoneGrant(rangerReadObjects, acls(READ))); } @@ -887,41 +891,49 @@ public void testCreatePathsAndPermissionsWithConditionPrefixesForObjectActionMus public void testCreatePathsAndPermissionsWithConditionPrefixesForBucketActionWhenActionIsListBucket() { final Set actions = Collections.singleton(S3Action.LIST_BUCKET); final Set prefixes = strSet("folder1/", "folder2/"); + final IamSessionPolicyResolver.Condition condition = new IamSessionPolicyResolver.Condition( + "StringEquals", prefixes); final Set nativeResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.BUCKET, "bucket1", null, null)); - final Set nativeReadObjects = objSet( - prefix("bucket1", "folder1/"), prefix("bucket1", "folder2/"), volume()); + final Set nativeListObjects = objSet( + prefix("bucket1", "folder1/"), prefix("bucket1", "folder2/")); + final Set nativeReadObject = objSet(volume()); final Set nativeReadAndListObject = objSet(bucket("bucket1")); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, prefixes, objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, condition, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactlyInAnyOrder( - new OzoneGrant(nativeReadObjects, acls(READ)), new OzoneGrant(nativeReadAndListObject, acls(READ, LIST))); + new OzoneGrant(nativeListObjects, acls(LIST)), new OzoneGrant(nativeReadAndListObject, acls(READ, LIST)), + new OzoneGrant(nativeReadObject, acls(READ))); final Set rangerResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.BUCKET, "bucket1", null, null)); - final Set rangerReadObjects = objSet( - key("bucket1", "folder1/"), key("bucket1", "folder2/"), volume()); + final Set rangerListObjects = objSet( + key("bucket1", "folder1/"), key("bucket1", "folder2/")); + final Set rangerReadObject = objSet(volume()); final Set rangerReadAndListObject = objSet(bucket("bucket1")); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, prefixes, objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, condition, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( - new OzoneGrant(rangerReadObjects, acls(READ)), new OzoneGrant(rangerReadAndListObject, acls(READ, LIST))); + new OzoneGrant(rangerListObjects, acls(LIST)), new OzoneGrant(rangerReadAndListObject, acls(READ, LIST)), + new OzoneGrant(rangerReadObject, acls(READ))); } @Test public void testCreatePathsAndPermissionsWithConditionPrefixesForBucketActionWhenActionIsNotListBucket() { final Set actions = Collections.singleton(S3Action.GET_BUCKET_ACL); final Set prefixes = strSet("folder1/", "folder2/"); + final IamSessionPolicyResolver.Condition condition = new IamSessionPolicyResolver.Condition( + "StringEquals", prefixes); final Set readObject = objSet(volume()); final Set readAndReadAclObject = objSet(bucket("bucket1")); final Set nativeResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.BUCKET, "bucket1", null, null)); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, prefixes, objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, condition, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactlyInAnyOrder( new OzoneGrant(readObject, acls(READ)), new OzoneGrant(readAndReadAclObject, acls(READ, READ_ACL))); @@ -929,7 +941,7 @@ public void testCreatePathsAndPermissionsWithConditionPrefixesForBucketActionWhe final Set rangerResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.BUCKET, "bucket1", null, null)); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, prefixes, objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, condition, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( new OzoneGrant(readObject, acls(READ)), new OzoneGrant(readAndReadAclObject, acls(READ, READ_ACL))); @@ -942,14 +954,14 @@ public void testCreatePathsAndPermissionsWithNoMappedActions() { final Set nativeResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.OBJECT_PREFIX, "bucket1", null, null)); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, nativeResourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).isEmpty(); final Set rangerResourceSpecs = Collections.singleton( new IamSessionPolicyResolver.ResourceSpec(S3ResourceType.OBJECT_PREFIX_WILDCARD, "bucket1", null, null)); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, rangerResourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).isEmpty(); } @@ -960,12 +972,12 @@ public void testCreatePathsAndPermissionsWithNoMappedResources() { final Set resourceSpecs = emptySet(); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).isEmpty(); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).isEmpty(); } @@ -981,13 +993,13 @@ public void testCreatePathsAndPermissionsDeduplicatesAcrossSameResourceTypes() { final Set readObjects = objSet(bucket("bucket1"), volume()); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactlyInAnyOrder( new OzoneGrant(readAndDeleteObject, acls(READ, DELETE)), new OzoneGrant(readObjects, acls(READ))); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( new OzoneGrant(readAndDeleteObject, acls(READ, DELETE)), new OzoneGrant(readObjects, acls(READ))); @@ -1004,19 +1016,23 @@ public void testCreatePathsAndPermissionsWithAllS3ActionsOverridesAnyOtherAction .collect(Collectors.toSet()); final Set allObjects = objSet(key("bucket1", "key.txt"), bucket("bucket2")); - final Set nativeReadObjects = objSet(volume(), bucket("bucket1"), prefix("bucket2", "")); + final Set nativeReadObjects = objSet(volume(), bucket("bucket1")); + final Set nativeListObject = objSet(prefix("bucket2", "")); final Map> objToAclsMapNative = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, emptySet(), objToAclsMapNative); + createPathsAndPermissions(VOLUME, NATIVE, actions, resourceSpecs, null, objToAclsMapNative); final Set resultNative = groupObjectsByAcls(objToAclsMapNative); assertThat(resultNative).containsExactlyInAnyOrder( - new OzoneGrant(allObjects, acls(ALL)), new OzoneGrant(nativeReadObjects, acls(READ))); + new OzoneGrant(allObjects, acls(ALL)), new OzoneGrant(nativeReadObjects, acls(READ)), + new OzoneGrant(nativeListObject, acls(LIST))); - final Set rangerReadObjects = objSet(volume(), bucket("bucket1"), key("bucket2", "*")); + final Set rangerReadObjects = objSet(volume(), bucket("bucket1")); + final Set rangerListObject = objSet(key("bucket2", "*")); final Map> objToAclsMapRanger = new LinkedHashMap<>(); - createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, emptySet(), objToAclsMapRanger); + createPathsAndPermissions(VOLUME, RANGER, actions, resourceSpecs, null, objToAclsMapRanger); final Set resultRanger = groupObjectsByAcls(objToAclsMapRanger); assertThat(resultRanger).containsExactlyInAnyOrder( - new OzoneGrant(allObjects, acls(ALL)), new OzoneGrant(rangerReadObjects, acls(READ))); + new OzoneGrant(allObjects, acls(ALL)), new OzoneGrant(rangerReadObjects, acls(READ)), + new OzoneGrant(rangerListObject, acls(LIST))); } @Test @@ -1050,17 +1066,19 @@ public void testDeduplicatesAcrossMultipleStatementsWhenSameStatementsArePresent // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: bucket READ, LIST, READ_ACL, WRITE_ACL; volume and prefix "" READ + // Expected for native: bucket READ, LIST, READ_ACL, WRITE_ACL; volume READ and prefix "" LIST final Set bucketSet = objSet(bucket("my-bucket")); final Set bucketAcls = acls(READ, LIST, READ_ACL, WRITE_ACL); expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedNative.add(new OzoneGrant(objSet(volume(), prefix("my-bucket", "")), acls(READ))); + expectedResolvedNative.add(new OzoneGrant(objSet(prefix("my-bucket", "")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: bucket READ, LIST, READ_ACL, WRITE_ACL; volume and key "*" READ + // Expected for Ranger: bucket READ, LIST, READ_ACL, WRITE_ACL; volume READ and key "*" LIST expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedRanger.add(new OzoneGrant(objSet(volume(), key("my-bucket", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("my-bucket", "*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1095,19 +1113,21 @@ public void testDeduplicatesAcrossMultipleStatementsForSameActionsButDifferentRe // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: bucket READ, LIST, READ_ACL, WRITE_ACL; volume and prefix "" READ + // Expected for native: bucket READ, LIST, READ_ACL, WRITE_ACL; volume READ and prefix "" LIST final Set bucketSet = objSet(bucket("my-bucket"), bucket("my-bucket2")); final Set bucketAcls = acls(READ, LIST, READ_ACL, WRITE_ACL); expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); expectedResolvedNative.add(new OzoneGrant( - objSet(volume(), prefix("my-bucket2", ""), prefix("my-bucket", "")), acls(READ))); + objSet(prefix("my-bucket2", ""), prefix("my-bucket", "")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: bucket READ, LIST, READ_ACL, WRITE_ACL; volume and key "*" READ + // Expected for Ranger: bucket READ, LIST, READ_ACL, WRITE_ACL; volume READ and key "*" LIST expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); expectedResolvedRanger.add(new OzoneGrant( - objSet(volume(), key("my-bucket2", "*"), key("my-bucket", "*")), acls(READ))); + objSet(key("my-bucket2", "*"), key("my-bucket", "*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1141,17 +1161,19 @@ public void testDeduplicatesAcrossMultipleStatementsForDifferentActionsButSameRe // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: bucket READ, LIST, READ_ACL, WRITE_ACL, CREATE; volume, prefix "" READ + // Expected for native: bucket READ, LIST, READ_ACL, WRITE_ACL, CREATE; volume READ, prefix "" LIST final Set bucketSet = objSet(bucket("my-bucket")); final Set bucketAcls = acls(READ, LIST, READ_ACL, WRITE_ACL, CREATE); expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedNative.add(new OzoneGrant(objSet(volume(), prefix("my-bucket", "")), acls(READ))); + expectedResolvedNative.add(new OzoneGrant(objSet(prefix("my-bucket", "")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: bucket READ, LIST, READ_ACL, WRITE_ACL, CREATE; volume, key "*" READ + // Expected for Ranger: bucket READ, LIST, READ_ACL, WRITE_ACL, CREATE; volume READ, key "*" LIST expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedRanger.add(new OzoneGrant(objSet(volume(), key("my-bucket", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("my-bucket", "*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1182,17 +1204,19 @@ public void testDeduplicatesAcrossMultipleStatementsWhenAllActionPresent() throw // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: bucket ALL (instead of individual actions); volume and prefix "" READ + // Expected for native: bucket ALL (instead of individual actions); volume READ and prefix "" LIST final Set bucketSet = objSet(bucket("my-bucket")); final Set bucketAcls = acls(ALL); expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedNative.add(new OzoneGrant(objSet(volume(), prefix("my-bucket", "")), acls(READ))); + expectedResolvedNative.add(new OzoneGrant(objSet(prefix("my-bucket", "")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: bucket ALL (instead of individual actions); volume and key "*" READ + // Expected for Ranger: bucket ALL (instead of individual actions); volume READ and key "*" LIST expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedRanger.add(new OzoneGrant(objSet(volume(), key("my-bucket", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("my-bucket", "*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1252,6 +1276,30 @@ public void testAllActionsForKey() throws OMException { assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } + @Test + public void testAllActionsForKeyWithPrefixCondition() throws OMException { + final String json = "{\n" + + " \"Statement\": [{\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": \"s3:*\",\n" + + " \"Resource\": \"arn:aws:s3:::my-bucket/*\",\n" + + " \"Condition\": {\n" + + " \"StringLike\": {\n" + + " \"s3:prefix\": [ \"team/folder\", \"team/folder/*\" ]\n" + + " }\n" + + " }\n" + + " }]\n" + + "}"; + + final Set resolvedFromNativeAuthorizer = resolve(json, VOLUME, NATIVE); + final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); + + // Ensure what we got is what we expected - only ListBucket supports s3:prefix and that is a bucket action, + // not object action + assertThat(resolvedFromNativeAuthorizer).isEmpty(); + assertThat(resolvedFromRangerAuthorizer).isEmpty(); + } + @Test public void testAllActionsForBucket() throws OMException { final String json = "{\n" + @@ -1267,20 +1315,62 @@ public void testAllActionsForBucket() throws OMException { // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: all Bucket ACLs for bucket; volume, prefix "" READ + // Expected for native: all Bucket ACLs for bucket; volume READ, prefix "" LIST final Set bucketSet = objSet(bucket("my-bucket")); final Set allBucketAcls = acls(ALL); - expectedResolvedNative.add(new OzoneGrant(objSet(volume(), prefix("my-bucket", "")), acls(READ))); + expectedResolvedNative.add(new OzoneGrant(objSet(prefix("my-bucket", "")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); expectedResolvedNative.add(new OzoneGrant(bucketSet, allBucketAcls)); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); - // Expected for Ranger: all Bucket ACLs for bucket; volume, key "*" READ + // Expected for Ranger: all Bucket ACLs for bucket; volume READ, key "*" LIST final Set expectedResolvedRanger = new LinkedHashSet<>(); - expectedResolvedRanger.add(new OzoneGrant(objSet(volume(), key("my-bucket", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("my-bucket", "*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); expectedResolvedRanger.add(new OzoneGrant(bucketSet, allBucketAcls)); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } + @Test + public void testAllActionsForBucketWithPrefixCondition() throws OMException { + final String json = "{\n" + + " \"Statement\": [{\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": \"s3:*\",\n" + + " \"Resource\": \"arn:aws:s3:::my-bucket\",\n" + + " \"Condition\": {\n" + + " \"StringLike\": {\n" + + " \"s3:prefix\": [ \"team/folder\", \"team/folder/*\" ]\n" + + " }\n" + + " }\n" + + " }]\n" + + "}"; + + final Set resolvedFromNativeAuthorizer = resolve(json, VOLUME, NATIVE); + final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); + + // Ensure what we got is what we expected + final Set expectedResolvedNative = new LinkedHashSet<>(); + // Expected for native: READ, LIST ACLs for bucket (only ListBucket supports s3:prefix); volume READ; + // prefix "team/folder", "team/folder/" LIST + final Set bucketSet = objSet(bucket("my-bucket")); + final Set bucketAcls = acls(READ, LIST); + expectedResolvedNative.add( + new OzoneGrant(objSet(prefix("my-bucket", "team/folder"), prefix("my-bucket", "team/folder/")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); + expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); + assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); + + // Expected for Ranger: READ, LIST ACLs for bucket (only ListBucket supports s3:prefix); volume READ, + // key "team/folder", "team/folder/*" LIST + final Set expectedResolvedRanger = new LinkedHashSet<>(); + expectedResolvedRanger.add( + new OzoneGrant(objSet(key("my-bucket", "team/folder"), key("my-bucket", "team/folder/*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); + assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); + } + @Test public void testMultipleResourcesInSeparateStatements() throws OMException { final String json = "{\n" + @@ -1457,11 +1547,12 @@ public void testListBucketWithWildcard() throws OMException { final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); // Ensure what we got is what we expected final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: bucket READ and LIST on wildcard pattern; volume and key "*" READ + // Expected for Ranger: bucket READ and LIST on wildcard pattern; volume READ; key "*" LIST final Set bucketSet = objSet(bucket("proj-*")); final Set bucketAcls = acls(READ, LIST); expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedRanger.add(new OzoneGrant(objSet(volume(), key("proj-*", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("proj-*", "*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1485,19 +1576,23 @@ public void testListBucketOperationsWithNoPrefixes() throws OMException { // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: bucket READ and LIST; volume, prefix "" READ + // Expected for native: bucket READ and LIST; volume, prefix "" LIST final Set bucketSet = objSet(bucket("proj")); final Set bucketAcls = acls(READ, LIST); - final Set nativeReadObjects = objSet(volume(), prefix("proj", "")); + final Set nativeListObject = objSet(prefix("proj", "")); + final Set nativeReadObject = objSet(volume()); expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedNative.add(new OzoneGrant(nativeReadObjects, acls(READ))); + expectedResolvedNative.add(new OzoneGrant(nativeListObject, acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(nativeReadObject, acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); - // Expected for Ranger: bucket READ and LIST; volume, key "*" READ - final Set rangerReadObjects = objSet(volume(), key("proj", "*")); + // Expected for Ranger: bucket READ and LIST; volume READ, key "*" LIST + final Set rangerListObject = objSet(key("proj", "*")); + final Set rangerReadObject = objSet(volume()); final Set expectedResolvedRanger = new LinkedHashSet<>(); expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedRanger.add(new OzoneGrant(rangerReadObjects, acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(rangerListObject, acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(rangerReadObject, acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1511,15 +1606,15 @@ public void testIgnoresUnsupportedActionsWhenSupportedActionsAreIncluded() throw " \"Effect\": \"Allow\",\n" + " \"Action\": [\n" + " \"s3:GetAccelerateConfiguration\",\n" + // unsupported action - " \"s3:GetBucketAcl\",\n" + + " \"s3:GetBucketAcl\",\n" + // ignored because it doesn't support s3:prefix condition " \"s3:GetObject\",\n" + // object-level action not applied for bucket " \"s3:GetObjectAcl\",\n" + // unsupported action " \"s3:ListBucket\",\n" + - " \"s3:ListBucketMultipartUploads\"\n" + + " \"s3:ListBucketMultipartUploads\"\n" + // ignored because it doesn't support s3:prefix condition " ],\n" + " \"Resource\": \"arn:aws:s3:::bucket1\",\n" + " \"Condition\": {\n" + - " \"StringEquals\": {\n" + + " \"StringLike\": {\n" + " \"s3:prefix\": [ \"team/folder\", \"team/folder/*\" ]\n" + " }\n" + " }\n" + @@ -1533,19 +1628,23 @@ public void testIgnoresUnsupportedActionsWhenSupportedActionsAreIncluded() throw // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: READ, LIST, READ_ACL bucket acls; volume and prefixes "team/folder", "team/folder/" READ + // Expected for native: READ, LIST bucket acls; volume READ; + // prefixes "team/folder", "team/folder/" LIST final Set bucketSet = objSet(bucket("bucket1")); - final Set bucketAcls = acls(READ, LIST, READ_ACL); + final Set bucketAcls = acls(READ, LIST); expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); expectedResolvedNative.add(new OzoneGrant( - objSet(volume(), prefix("bucket1", "team/folder"), prefix("bucket1", "team/folder/")), acls(READ))); + objSet(prefix("bucket1", "team/folder"), prefix("bucket1", "team/folder/")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: READ, LIST, READ_ACL bucket acls; volume and keys "team/folder" and "team/folder/*" READ + // Expected for Ranger: READ, LIST bucket acls; volume READ; + // keys "team/folder" and "team/folder/*" LIST expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); expectedResolvedRanger.add(new OzoneGrant( - objSet(volume(), key("bucket1", "team/folder"), key("bucket1", "team/folder/*")), acls(READ))); + objSet(key("bucket1", "team/folder"), key("bucket1", "team/folder/*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1563,20 +1662,59 @@ public void testMultiplePrefixesWithWildcards() throws OMException { final Set resolvedFromNativeAuthorizer = resolve(json, VOLUME, NATIVE); final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); - // Ensure what we got is what we expected + // s3:prefix conditions do not apply to object actions like s3:GetObject. + assertThat(resolvedFromNativeAuthorizer).isEmpty(); + assertThat(resolvedFromRangerAuthorizer).isEmpty(); + } + + @Test + public void testListAndGetWithPrefixConditionSkipsObjectAction() throws OMException { + final String json = "{\n" + + " \"Statement\": [{\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": [\"s3:ListBucket\", \"s3:GetObject\"],\n" + + " \"Resource\": [\"arn:aws:s3:::logs\", \"arn:aws:s3:::logs/*\"],\n" + + " \"Condition\": { \"StringLike\": { \"s3:prefix\": \"team/*\" } }\n" + + " }]\n" + + "}"; + + final Set resolvedFromNativeAuthorizer = resolve(json, VOLUME, NATIVE); + final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); + + // Expected for native (GetObject is ignored because s3:prefix is present): READ, LIST bucket acls; volume READ; + // prefix "log/team" LIST final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: READ acl on prefix "" (condition prefixes are ignored); bucket READ; volume READ; - final Set readObjectsNative = objSet(prefix("logs", ""), bucket("logs"), volume()); - expectedResolvedNative.add(new OzoneGrant(readObjectsNative, acls(READ))); + expectedResolvedNative.add(new OzoneGrant(objSet(bucket("logs")), acls(READ, LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(prefix("logs", "team/")), acls(LIST))); + expectedResolvedNative.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); + // Expected for Ranger (GetObject is ignored because s3:prefix is present): READ, LIST bucket acls; volume READ; + // key "log/team/*" LIST final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: READ acl on key "*" (condition prefixes are ignored) - final Set keySet = objSet(key("logs", "*"), bucket("logs"), volume()); - expectedResolvedRanger.add(new OzoneGrant(keySet, acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(bucket("logs")), acls(READ, LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("logs", "team/*")), acls(LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } + @Test + public void testListBucketOnObjectResourceReturnsEmpty() throws OMException { + final String json = "{\n" + + " \"Statement\": [{\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": \"s3:ListBucket\",\n" + + " \"Resource\": \"arn:aws:s3:::logs/*\"\n" + + " }]\n" + + "}"; + + final Set resolvedFromNativeAuthorizer = resolve(json, VOLUME, NATIVE); + final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); + + assertThat(resolvedFromNativeAuthorizer).isEmpty(); + assertThat(resolvedFromRangerAuthorizer).isEmpty(); + } + @Test public void testObjectResourceWithWildcardInMiddle() throws OMException { final String json = "{\n" + @@ -1647,10 +1785,10 @@ public void testBucketActionOnAllResources() throws OMException { final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); // Ensure what we got is what we expected final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: READ and LIST on volume and bucket (wildcard), READ on key "*" + // Expected for Ranger: READ and LIST on volume and bucket (wildcard), LIST on key "*" final Set resourceSet = objSet(volume(), bucket("*")); expectedResolvedRanger.add(new OzoneGrant(resourceSet, acls(READ, LIST))); - expectedResolvedRanger.add(new OzoneGrant(objSet(key("*", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("*", "*")), acls(LIST))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1678,6 +1816,36 @@ public void testObjectActionOnAllResources() throws OMException { assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } + @Test + public void testAllActionsOnAllResourcesWithPrefixCondition() throws OMException { + final String json = "{\n" + + " \"Statement\": [{\n" + + " \"Effect\": \"Allow\",\n" + + " \"Action\": \"s3:*\",\n" + + " \"Resource\": \"*\",\n" + + " \"Condition\": {\n" + + " \"StringLike\": {\n" + + " \"s3:prefix\": [ \"team/folder\", \"team/folder/*\" ]\n" + + " }\n" + + " }\n" + + " }]\n" + + "}"; + + // Wildcards on bucket are not supported for Native authorizer + expectBucketWildcardUnsupportedExceptionForNativeAuthorizer(json); + + final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); + // Ensure what we got is what we expected + final Set expectedResolvedRanger = new LinkedHashSet<>(); + // Expected for Ranger: (only ListBucket supports s3:prefix) READ volume; READ, LIST acl on bucket; + // LIST on key "team/folder", "team/folder/*" + expectedResolvedRanger.add(new OzoneGrant(objSet(bucket("*")), acls(READ, LIST))); + expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ))); + expectedResolvedRanger.add( + new OzoneGrant(objSet(key("*", "team/folder"), key("*", "team/folder/*")), acls(LIST))); + assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); + } + @Test public void testAllActionsOnAllResources() throws OMException { final String json = "{\n" + @@ -1717,11 +1885,12 @@ public void testAllActionsOnAllBucketResources() throws OMException { final Set resolvedFromRangerAuthorizer = resolve(json, VOLUME, RANGER); // Ensure what we got is what we expected final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: ALL bucket acls on wildcard pattern, volume READ, key "*" READ + // Expected for Ranger: ALL bucket acls on wildcard pattern, volume READ and LIST (because of ListAllMyBuckets), + // key "*" LIST final Set bucketSet = objSet(bucket("*")); final Set bucketAcls = acls(ALL); expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - expectedResolvedRanger.add(new OzoneGrant(objSet(key("*", "*")), acls(READ))); + expectedResolvedRanger.add(new OzoneGrant(objSet(key("*", "*")), acls(LIST))); expectedResolvedRanger.add(new OzoneGrant(objSet(volume()), acls(READ, LIST))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } @@ -1804,21 +1973,20 @@ public void testWildcardActionGroupListStar() throws OMException { // Ensure what we got is what we expected final Set expectedResolvedNative = new LinkedHashSet<>(); - // Expected for native: READ, LIST bucket acls - final Set bucketSet = objSet(bucket("my-bucket")); - final Set bucketAcls = acls(READ, LIST); - expectedResolvedNative.add(new OzoneGrant(bucketSet, bucketAcls)); - // Expected for native: READ acl on prefix "" under bucket; volume READ - final Set readObjectsNative = objSet(prefix("my-bucket", ""), volume()); - expectedResolvedNative.add(new OzoneGrant(readObjectsNative, acls(READ))); + // Expected for native: READ, LIST bucket acls, READ and LIST acl on prefix "" under bucket; volume READ + final Set readAndListsObjectsNative = objSet(bucket("my-bucket"), prefix("my-bucket", "")); + final Set readObjectNative = objSet(volume()); + expectedResolvedNative.add(new OzoneGrant(readAndListsObjectsNative, acls(READ, LIST))); + expectedResolvedNative.add(new OzoneGrant(readObjectNative, acls(READ))); assertThat(resolvedFromNativeAuthorizer).isEqualTo(expectedResolvedNative); final Set expectedResolvedRanger = new LinkedHashSet<>(); - // Expected for Ranger: READ, LIST bucket acls - expectedResolvedRanger.add(new OzoneGrant(bucketSet, bucketAcls)); - // Expected for Ranger: READ key acl for resource type KEY with key name "*"; volume READ - final Set readObjectsRanger = objSet(key("my-bucket", "*"), volume()); - expectedResolvedRanger.add(new OzoneGrant(readObjectsRanger, acls(READ))); + // Expected for Ranger: READ, LIST bucket acls; READ and LIST key acl for resource type KEY with key name "*"; + // volume READ + final Set readAndListObjectsRanger = objSet(bucket("my-bucket"), key("my-bucket", "*")); + final Set readObjectRanger = objSet(volume()); + expectedResolvedRanger.add(new OzoneGrant(readAndListObjectsRanger, acls(READ, LIST))); + expectedResolvedRanger.add(new OzoneGrant(readObjectRanger, acls(READ))); assertThat(resolvedFromRangerAuthorizer).isEqualTo(expectedResolvedRanger); } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index a6f797dd9739..9bb0d801ee7b 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1307,6 +1307,9 @@ message ListStatusRequest { required string startKey = 3; required uint64 numEntries = 4; optional bool allowPartialPrefix = 5; + // When keyArgs.keyName is empty (root listing), this is the original S3/list + // prefix for STS auth. Enables LIST check on this prefix instead of "*". + optional string listPrefix = 6; } message ListStatusResponse { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index 8ce694993780..be28e3312cc2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -234,8 +234,24 @@ public List listStatus(OmKeyArgs args, boolean recursive, try { if (isAclEnabled) { - checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - bucket, args.getKeyName()); + if (isStsS3Request()) { + // We need to be able to tell the difference between being able to download a file and merely seeing the file + // name in a list. Use READ for download ability and LIST (here) for listing. + // When keyName is empty (root listing), use listPrefix for auth if set (e.g. from S3 shallow list with + // prefix). Otherwise fall back to "*" which requires full bucket LIST permission. + final String aclKey; + if (args.getKeyName() != null && !args.getKeyName().isEmpty()) { + aclKey = args.getKeyName(); + } else if (args.getListPrefix() != null && !args.getListPrefix().isEmpty()) { + aclKey = args.getListPrefix(); + } else { + aclKey = "*"; + } + checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.LIST, bucket.realVolume(), bucket.realBucket(), aclKey); + } else { + checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, + bucket, args.getKeyName()); + } } metrics.incNumListStatus(); return keyManager.listStatus(args, recursive, startKey, @@ -277,8 +293,12 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { try { if (isAclEnabled) { - checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, - bucket, args.getKeyName()); + if (isStsS3Request()) { + checkAcls(getResourceType(args), StoreType.OZONE, ACLType.LIST, bucket, args.getKeyName()); + } else { + checkAcls(getResourceType(args), StoreType.OZONE, ACLType.READ, + bucket, args.getKeyName()); + } } metrics.incNumGetFileStatus(); return keyManager.getFileStatus(args, getClientAddress()); @@ -347,6 +367,14 @@ public ListKeysResult listKeys(String volumeName, String bucketName, checkAcls(ResourceType.BUCKET, StoreType.OZONE, ACLType.LIST, bucket.realVolume(), bucket.realBucket(), keyPrefix) ); + + if (isStsS3Request()) { + // With STS we must check acl on the prefix to be compliant with AWS + final String aclKey = (keyPrefix == null || keyPrefix.isEmpty()) ? "*" : keyPrefix; + captureLatencyNs( + perfMetrics.getListKeysAclCheckLatencyNs(), () -> checkAcls( + ResourceType.KEY, StoreType.OZONE, ACLType.LIST, bucket.realVolume(), bucket.realBucket(), aclKey)); + } } metrics.incNumKeyLists(); return keyManager.listKeys(bucket.realVolume(), bucket.realBucket(), @@ -698,6 +726,10 @@ public boolean isNativeAuthorizerEnabled() { return accessAuthorizer.isNative(); } + private boolean isStsS3Request() { + return getS3Auth() != null && OzoneManager.getStsTokenIdentifier() != null; + } + private ResourceType getResourceType(OmKeyArgs args) { if (args.getKeyName() == null || args.getKeyName().isEmpty()) { return ResourceType.BUCKET; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index bea7785bfbc2..8efc514a9d6a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -1250,14 +1250,17 @@ private ListStatusResponse listStatus( private ListStatusLightResponse listStatusLight( ListStatusRequest request, int clientVersion) throws IOException { KeyArgs keyArgs = request.getKeyArgs(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + OmKeyArgs.Builder omKeyArgsBuilder = new OmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) .setSortDatanodesInPipeline(false) .setLatestVersionLocation(true) - .setHeadOp(keyArgs.getHeadOp()) - .build(); + .setHeadOp(keyArgs.getHeadOp()); + if (request.hasListPrefix() && !request.getListPrefix().isEmpty()) { + omKeyArgsBuilder.setListPrefix(request.getListPrefix()); + } + OmKeyArgs omKeyArgs = omKeyArgsBuilder.build(); boolean allowPartialPrefixes = request.hasAllowPartialPrefix() && request.getAllowPartialPrefix(); List statuses = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java index 44d8b63b973f..f20e580476c5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/STSSecurityUtil.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.security; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_TOKEN; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.InvalidProtocolBufferException; @@ -71,7 +72,7 @@ public static STSTokenIdentifier constructValidateAndDecryptSTSToken(String sess * @throws SecretManager.InvalidToken if the token is invalid */ private static STSTokenIdentifier verifyAndDecryptToken(Token token, - SecretKeyClient secretKeyClient, Clock clock) throws SecretManager.InvalidToken { + SecretKeyClient secretKeyClient, Clock clock) throws SecretManager.InvalidToken, OMException { if (!STSTokenIdentifier.KIND_NAME.equals(token.getKind())) { throw new SecretManager.InvalidToken("Invalid STS token - kind is incorrect: " + token.getKind()); } @@ -109,7 +110,7 @@ private static STSTokenIdentifier verifyAndDecryptToken(Token ipcServerStaticMock = mockStatic(Server.class); - MockedStatic grpcRequestContextStaticMock = mockStatic(Context.class); - ) { + public void testGetClientAddress() throws Exception { + try (MockedStatic ipcServerStaticMock = mockStatic(Server.class)) { // given String expectedClientAddressInCaseOfHadoopRpcCall = "hadoop.ipc.client.com"; @@ -66,15 +90,11 @@ public void testGetClientAddress() { .thenReturn(null, null, expectedClientAddressInCaseOfHadoopRpcCall); String expectedClientAddressInCaseOfGrpcCall = "172.45.23.4"; - Context.Key clientIpAddressKey = mock(Context.Key.class); - when(clientIpAddressKey.get()) - .thenReturn(expectedClientAddressInCaseOfGrpcCall, null); - - grpcRequestContextStaticMock.when(() -> Context.key("CLIENT_IP_ADDRESS")) - .thenReturn(clientIpAddressKey); - // when (GRPC call with defined client address) - String clientAddress = OmMetadataReader.getClientAddress(); + String clientAddress = Context.current() + .withValue(GrpcClientConstants.CLIENT_IP_ADDRESS_CTX_KEY, + expectedClientAddressInCaseOfGrpcCall) + .call(OmMetadataReader::getClientAddress); // then assertEquals(expectedClientAddressInCaseOfGrpcCall, clientAddress); @@ -93,12 +113,12 @@ public void testGetClientAddress() { @Test public void testCheckAclsAttachesSessionPolicyFromThreadLocal() throws Exception { final String sessionPolicy = "session-policy-from-thread-local"; - setupStsTokenIdentifier(sessionPolicy); + setupStsTokenIdentifier(); final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer); - final RequestContext contextWithoutSessionPolicy = createTestRequestContext(null); + final RequestContext contextWithoutSessionPolicy = createTestRequestContext(); final OzoneObj obj = createTestOzoneObj(); assertTrue(omMetadataReader.checkAcls(obj, contextWithoutSessionPolicy, true)); @@ -114,7 +134,7 @@ public void testNoSessionPolicyWhenThreadLocalIsNull() throws Exception { final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer); - final RequestContext contextWithoutSessionPolicy = createTestRequestContext(null); + final RequestContext contextWithoutSessionPolicy = createTestRequestContext(); final OzoneObj obj = createTestOzoneObj(); assertTrue(omMetadataReader.checkAcls(obj, contextWithoutSessionPolicy, true)); @@ -122,25 +142,222 @@ public void testNoSessionPolicyWhenThreadLocalIsNull() throws Exception { verifySessionPolicyPassedToAuthorizer(accessAuthorizer, obj, null); } - private OmMetadataReader createMetadataReader(IAccessAuthorizer accessAuthorizer) { + @Test + public void testListStatusUsesListAclForStsS3Request() throws Exception { + setupStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createListStatusKeyManagerReturningEmpty(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = createOmKeyArgs(); + + final List statuses = omMetadataReader.listStatus(args, false, "", MAX_KEYS, false); + assertTrue(statuses.isEmpty()); + + final List checks = captureAclChecks(accessAuthorizer, 2); + + // For STS S3 requests, listStatus() performs these checks: + // 1. Volume READ (for volume access) + // 2) Key LIST (for the specific prefix being listed) - we need LIST permission for STS in order to tell whether the + // file should be listed only or downloadable (downloadable would be READ) + assertContainsVolumeReadCheck(checks); + assertContainsKeyListCheckWithName(checks, KEY_PREFIX); + } + + @Test + public void testListStatusUsesReadAclForNonStsRequest() throws Exception { + setupNonStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createListStatusKeyManagerReturningEmpty(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = createOmKeyArgs(); + + final List statuses = omMetadataReader.listStatus(args, false, "", MAX_KEYS, false); + assertTrue(statuses.isEmpty()); + + final List checks = captureAclChecks(accessAuthorizer, 2); + assertTrue(checks.stream().allMatch(check -> check.getContext().getAclRights() == READ)); + + assertContainsVolumeReadCheck(checks); + // We want to ensure the current behavior for non-STS requests remains the same + assertContainsKeyReadCheckWithName(checks); + assertDoesNotContainKeyListCheck(checks); + } + + @Test + public void testListStatusUsesListPrefixForAclWhenKeyNameEmptyAndListPrefixSet() throws Exception { + setupStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createListStatusKeyManagerReturningEmpty(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = new OmKeyArgs.Builder() + .setVolumeName(VOLUME_NAME) + .setBucketName(BUCKET_NAME) + .setKeyName("") + .setListPrefix("userA/") + .build(); + + final List statuses = omMetadataReader.listStatus(args, false, "", MAX_KEYS, false); + assertTrue(statuses.isEmpty()); + + final List checks = captureAclChecks(accessAuthorizer, 2); + assertContainsVolumeReadCheck(checks); + assertContainsKeyListCheckWithName(checks, "userA/"); + } + + @Test + public void testListStatusUsesWildcardForAclWhenKeyNameAndListPrefixEmpty() throws Exception { + setupStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createListStatusKeyManagerReturningEmpty(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = new OmKeyArgs.Builder() + .setVolumeName(VOLUME_NAME) + .setBucketName(BUCKET_NAME) + .setKeyName("") + .build(); + + final List statuses = omMetadataReader.listStatus(args, false, "", MAX_KEYS, false); + assertTrue(statuses.isEmpty()); + + final List checks = captureAclChecks(accessAuthorizer, 2); + assertContainsVolumeReadCheck(checks); + assertContainsKeyListCheckWithName(checks, "*"); + } + + @Test + public void testListStatusKeyNameTakesPrecedenceOverListPrefix() throws Exception { + setupStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createListStatusKeyManagerReturningEmpty(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = new OmKeyArgs.Builder() + .setVolumeName(VOLUME_NAME) + .setBucketName(BUCKET_NAME) + .setKeyName(KEY_PREFIX) + .setListPrefix("other/") + .build(); + + final List statuses = omMetadataReader.listStatus(args, false, "", MAX_KEYS, false); + assertTrue(statuses.isEmpty()); + + final List checks = captureAclChecks(accessAuthorizer, 2); + assertContainsVolumeReadCheck(checks); + assertContainsKeyListCheckWithName(checks, KEY_PREFIX); + } + + @Test + public void testGetFileStatusUsesListAclForStsS3Request() throws Exception { + setupStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createGetFileStatusKeyManagerReturningStatus(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = createOmKeyArgs(); + + omMetadataReader.getFileStatus(args); + + final List checks = captureAclChecks(accessAuthorizer, 2); + assertContainsVolumeReadCheck(checks); + assertContainsKeyListCheckWithName(checks, KEY_PREFIX); + assertDoesNotContainKeyReadCheck(checks); + } + + @Test + public void testGetFileStatusUsesReadAclForNonStsS3Request() throws Exception { + setupNonStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createGetFileStatusKeyManagerReturningStatus(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + final OmKeyArgs args = createOmKeyArgs(); + + omMetadataReader.getFileStatus(args); + + final List checks = captureAclChecks(accessAuthorizer, 2); + assertContainsVolumeReadCheck(checks); + assertContainsKeyReadCheckWithName(checks); + assertDoesNotContainKeyListCheck(checks); + } + + @Test + public void testListKeysUsesPrefixCheckForStsS3Request() throws Exception { + setupStsS3Request(); + + final IAccessAuthorizer accessAuthorizer = createMockIAccessAuthorizerReturningTrue(); + final KeyManager keyManager = createListKeysKeyManagerReturningEmpty(); + + final OmMetadataReader omMetadataReader = createMetadataReader(accessAuthorizer, keyManager); + + // Case 1: List with prefix "userA/" + omMetadataReader.listKeys(VOLUME_NAME, BUCKET_NAME, "", "userA/", (int) MAX_KEYS); + + List checks = captureAclChecks(accessAuthorizer, 4); + assertContainsBucketListCheck(checks); + assertContainsKeyListCheckWithName(checks, "userA/"); + + // Reset to make case 2 assertions independent of case 1 captures. + reset(accessAuthorizer); + reenableAllowAllAccessChecks(accessAuthorizer); + + // Case 2: List with empty prefix (should check "*") + omMetadataReader.listKeys(VOLUME_NAME, BUCKET_NAME, "", "", (int) MAX_KEYS); + + checks = captureAclChecks(accessAuthorizer, 4); + assertContainsBucketListCheck(checks); + assertContainsKeyListCheckWithName(checks, "*"); + } + + private OmMetadataReader createMetadataReader(IAccessAuthorizer accessAuthorizer) throws IOException { + return createMetadataReader(accessAuthorizer, mock(KeyManager.class)); + } + + private OmMetadataReader createMetadataReader(IAccessAuthorizer accessAuthorizer, KeyManager keyManager) + throws IOException { final OzoneManager ozoneManager = mock(OzoneManager.class); when(ozoneManager.getBucketManager()).thenReturn(mock(BucketManager.class)); when(ozoneManager.getVolumeManager()).thenReturn(mock(VolumeManager.class)); + when(ozoneManager.getConfiguration()).thenReturn(new OzoneConfiguration()); when(ozoneManager.getAclsEnabled()).thenReturn(true); - when(ozoneManager.getPerfMetrics()).thenReturn(mock(OMPerformanceMetrics.class)); + final OMPerformanceMetrics perfMetrics = mock(OMPerformanceMetrics.class); + // OmMetadataReader uses these MutableRate metrics via MetricUtil.captureLatencyNs(...). + when(perfMetrics.getListKeysResolveBucketLatencyNs()).thenReturn(mock(MutableRate.class)); + when(perfMetrics.getListKeysAclCheckLatencyNs()).thenReturn(mock(MutableRate.class)); + when(ozoneManager.getPerfMetrics()).thenReturn(perfMetrics); + when(ozoneManager.getVolumeOwner(any(), any(), any())).thenReturn("volume-owner"); + when(ozoneManager.getBucketOwner(any(), any(), any(), any())).thenReturn("bucket-owner"); + when(ozoneManager.getOmRpcServerAddr()).thenReturn(new InetSocketAddress("127.0.0.1", 9874)); + when(ozoneManager.resolveBucketLink(any(Pair.class))) + .thenReturn( + new ResolvedBucket( + VOLUME_NAME, BUCKET_NAME, VOLUME_NAME, BUCKET_NAME, "bucket-owner", FILE_SYSTEM_OPTIMIZED)); + when(ozoneManager.resolveBucketLink(any(OmKeyArgs.class))) + .thenReturn( + new ResolvedBucket( + VOLUME_NAME, BUCKET_NAME, VOLUME_NAME, BUCKET_NAME, "bucket-owner", FILE_SYSTEM_OPTIMIZED)); return new OmMetadataReader( - mock(KeyManager.class), mock(PrefixManager.class), ozoneManager, mock(Logger.class), mock(AuditLogger.class), + keyManager, mock(PrefixManager.class), ozoneManager, mock(Logger.class), mock(AuditLogger.class), mock(OmMetadataReaderMetrics.class), accessAuthorizer); } /** - * Creates and sets a mock STSTokenIdentifier with the given session policy in the thread-local. - * @param sessionPolicy the session policy to return, or null + * Creates and sets a mock STSTokenIdentifier with a session policy in the thread-local. */ - private void setupStsTokenIdentifier(String sessionPolicy) { + private void setupStsTokenIdentifier() { final STSTokenIdentifier stsTokenIdentifier = mock(STSTokenIdentifier.class); - when(stsTokenIdentifier.getSessionPolicy()).thenReturn(sessionPolicy); + when(stsTokenIdentifier.getSessionPolicy()).thenReturn("session-policy-from-thread-local"); OzoneManager.setStsTokenIdentifier(stsTokenIdentifier); } @@ -156,23 +373,19 @@ private IAccessAuthorizer createMockIAccessAuthorizerReturningTrue() throws OMEx } /** - * Creates a test RequestContext with the given session policy. - * @param sessionPolicy the session policy to set, or null + * Creates a test RequestContext. + * * @return the constructed RequestContext */ - private RequestContext createTestRequestContext(String sessionPolicy) { + private RequestContext createTestRequestContext() { RequestContext.Builder builder = RequestContext.newBuilder() .setClientUgi(UserGroupInformation.createRemoteUser("testUser")) .setIp(InetAddress.getLoopbackAddress()) .setHost("localhost") .setAclType(IAccessAuthorizer.ACLIdentityType.USER) - .setAclRights(IAccessAuthorizer.ACLType.READ) + .setAclRights(READ) .setOwnerName("owner"); - if (sessionPolicy != null) { - builder.setSessionPolicy(sessionPolicy); - } - return builder.build(); } @@ -182,19 +395,63 @@ private RequestContext createTestRequestContext(String sessionPolicy) { */ private OzoneObj createTestOzoneObj() { return OzoneObjInfo.Builder.newBuilder() - .setResType(OzoneObj.ResourceType.KEY) + .setResType(KEY) .setStoreType(OzoneObj.StoreType.OZONE) - .setVolumeName("vol") - .setBucketName("bucket") + .setVolumeName(VOLUME_NAME) + .setBucketName(BUCKET_NAME) .setKeyName("key") .build(); } + private void setupStsS3Request() { + OzoneManager.setStsTokenIdentifier(mock(STSTokenIdentifier.class)); + OzoneManager.setS3Auth(S3Authentication.newBuilder().setAccessId(TestOMMetadataReader.ACCESS_KEY_ID).build()); + } + + private void setupNonStsS3Request() { + OzoneManager.setStsTokenIdentifier(null); + OzoneManager.setS3Auth(null); + } + + private OmKeyArgs createOmKeyArgs() { + return new OmKeyArgs.Builder() + .setVolumeName(VOLUME_NAME) + .setBucketName(BUCKET_NAME) + .setKeyName(TestOMMetadataReader.KEY_PREFIX) + .build(); + } + + private KeyManager createListStatusKeyManagerReturningEmpty() throws IOException { + final KeyManager keyManager = mock(KeyManager.class); + when(keyManager.listStatus(any(OmKeyArgs.class), eq(false), eq(""), eq(MAX_KEYS), any(), eq(false))) + .thenReturn(Collections.emptyList()); + return keyManager; + } + + private KeyManager createGetFileStatusKeyManagerReturningStatus() throws IOException { + final KeyManager keyManager = mock(KeyManager.class); + when(keyManager.getFileStatus(any(OmKeyArgs.class), any())) + .thenReturn(mock(OzoneFileStatus.class)); + return keyManager; + } + + private KeyManager createListKeysKeyManagerReturningEmpty() throws IOException { + final KeyManager keyManager = mock(KeyManager.class); + when(keyManager.listKeys(any(), any(), any(), any(), eq(100))) + .thenReturn(new ListKeysResult(Collections.emptyList(), false)); + return keyManager; + } + + private void reenableAllowAllAccessChecks(IAccessAuthorizer accessAuthorizer) throws OMException { + when(accessAuthorizer.checkAccess(any(OzoneObj.class), any(RequestContext.class))) + .thenReturn(true); + } + /** * Verifies that the accessAuthorizer received a call to checkAccess with the expected session policy. * @param accessAuthorizer the mock authorizer to verify * @param expectedObj the expected OzoneObj - * @param expectedSessionPolicy the expected session policy (may be null) + * @param expectedSessionPolicy the expected session policy (could be null) */ private void verifySessionPolicyPassedToAuthorizer(IAccessAuthorizer accessAuthorizer, OzoneObj expectedObj, String expectedSessionPolicy) throws OMException { @@ -202,4 +459,85 @@ private void verifySessionPolicyPassedToAuthorizer(IAccessAuthorizer accessAutho verify(accessAuthorizer).checkAccess(eq(expectedObj), captor.capture()); assertEquals(expectedSessionPolicy, captor.getValue().getSessionPolicy()); } + + private List captureAclChecks(IAccessAuthorizer accessAuthorizer, int expectedCheckCount) + throws OMException { + final ArgumentCaptor objCaptor = ArgumentCaptor.forClass(OzoneObj.class); + final ArgumentCaptor ctxCaptor = ArgumentCaptor.forClass(RequestContext.class); + verify(accessAuthorizer, times(expectedCheckCount)).checkAccess(objCaptor.capture(), ctxCaptor.capture()); + return toAclChecks(objCaptor.getAllValues(), ctxCaptor.getAllValues()); + } + + private List toAclChecks(List objs, List contexts) { + assertEquals(objs.size(), contexts.size(), "Captured ACL objects and contexts should align"); + final List checks = new ArrayList<>(); + for (int i = 0; i < objs.size(); i++) { + checks.add(new AclCheck(objs.get(i), contexts.get(i))); + } + return checks; + } + + private void assertContainsVolumeReadCheck(List checks) { + assertTrue(checks.stream().anyMatch(this::isVolumeReadCheck), "Expected a VOLUME READ ACL check"); + } + + private boolean isVolumeReadCheck(AclCheck check) { + return check.getObj().getResourceType() == VOLUME && check.getContext().getAclRights() == READ; + } + + private void assertContainsBucketListCheck(List checks) { + assertTrue( + checks.stream().anyMatch( + check -> check.getObj().getResourceType() == OzoneObj.ResourceType.BUCKET && + check.getContext().getAclRights() == LIST), + "Expected a BUCKET LIST ACL check"); + } + + private void assertContainsKeyListCheckWithName(List checks, String keyName) { + assertTrue( + checks.stream().anyMatch( + check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == LIST && + keyName.equals(check.getObj().getKeyName())), + "Expected a KEY LIST ACL check for key '" + keyName + "'"); + } + + private void assertContainsKeyReadCheckWithName(List checks) { + assertTrue( + checks.stream().anyMatch( + check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == READ && + TestOMMetadataReader.KEY_PREFIX.equals(check.getObj().getKeyName())), + "Expected a KEY READ ACL check for key '" + TestOMMetadataReader.KEY_PREFIX + "'"); + } + + private void assertDoesNotContainKeyReadCheck(List checks) { + assertFalse( + checks.stream().anyMatch( + check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == READ), + "Did not expect a KEY READ ACL check"); + } + + private void assertDoesNotContainKeyListCheck(List checks) { + assertFalse( + checks.stream().anyMatch( + check -> check.getObj().getResourceType() == KEY && check.getContext().getAclRights() == LIST), + "Did not expect a KEY LIST ACL check"); + } + + private static final class AclCheck { + private final OzoneObj obj; + private final RequestContext context; + + private AclCheck(OzoneObj obj, RequestContext context) { + this.obj = obj; + this.context = context; + } + + private OzoneObj getObj() { + return obj; + } + + private RequestContext getContext() { + return context; + } + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java index 6cf19b182eee..995adaaf4ab8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestSTSSecurityUtil.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.security; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.ArgumentMatchers.any; @@ -179,7 +180,8 @@ public void testConstructValidateAndDecryptSTSTokenExpired() throws Exception { assertThatThrownBy(() -> STSSecurityUtil.constructValidateAndDecryptSTSToken(tokenString, secretKeyClient, clock)) .isInstanceOf(OMException.class) - .hasMessageContaining("Invalid STS token format: Invalid STS token - token expired at"); + .satisfies(exception -> assertThat(((OMException) exception).getResult()).isEqualTo(TOKEN_EXPIRED)) + .hasMessageContaining("Invalid STS token - token expired at"); } @Test diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index bebe6d1d07ba..7278c9e5315f 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -30,6 +30,10 @@ + + com.fasterxml.jackson.core + jackson-annotations + com.fasterxml.jackson.core jackson-databind diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java index 1ac30f49797c..21d00cd419bb 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketAclHandler.java @@ -113,6 +113,8 @@ Response handleGetRequest(S3RequestContext context, String bucketName) auditReadFailure(context.getAction(), ex); if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else { @@ -232,6 +234,8 @@ Response handlePutRequest(S3RequestContext context, String bucketName, InputStre auditWriteFailure(context.getAction(), exception); if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, exception); + } else if (isExpiredToken(exception)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName, exception); } else if (isAccessDenied(exception)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, exception); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java index 982838d0dd04..81c9b2836a30 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketCrudHandler.java @@ -111,6 +111,8 @@ Response handleDeleteRequest(S3RequestContext context, String bucketName) throw newError(S3ErrorTable.BUCKET_NOT_EMPTY, bucketName, ex); } else if (ex.getResult() == OMException.ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 18ba9f34934f..85041687fdfa 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -163,7 +163,9 @@ public Response get( } catch (OMException ex) { auditReadFailure(s3GAction, ex); getMetrics().updateGetBucketFailureStats(startNanos); - if (isAccessDenied(ex)) { + if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName, ex); + } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); } else if (ex.getResult() == ResultCodes.FILE_NOT_FOUND) { // File not found, continue and send normal response with 0 keyCount @@ -362,7 +364,9 @@ public Response listMultipartUploads( } catch (OMException exception) { auditReadFailure(s3GAction, exception); getMetrics().updateListMultipartUploadsFailureStats(startNanos); - if (isAccessDenied(exception)) { + if (isExpiredToken(exception)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, prefix, exception); + } else if (isAccessDenied(exception)) { throw newError(S3ErrorTable.ACCESS_DENIED, prefix, exception); } throw exception; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 3d7f70d06c35..20ad21e23f11 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -194,6 +194,8 @@ protected OzoneBucket getBucket(OzoneVolume volume, String bucketName) } catch (OMException ex) { if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, s3Auth.getAccessID(), ex); } else if (ex.getResult() == ResultCodes.INVALID_TOKEN) { throw newError(S3ErrorTable.ACCESS_DENIED, s3Auth.getAccessID(), ex); @@ -259,6 +261,8 @@ protected OzoneBucket getBucket(String bucketName) if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND || ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, s3Auth.getAccessID(), ex); } else if (ex.getResult() == ResultCodes.INVALID_TOKEN) { throw newError(S3ErrorTable.ACCESS_DENIED, s3Auth.getAccessID(), ex); @@ -294,6 +298,8 @@ protected String createS3Bucket(String bucketName) throws getMetrics().updateCreateBucketFailureStats(startNanos); if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, s3Auth.getAccessID(), ex); } else if (ex.getResult() == ResultCodes.INVALID_TOKEN) { throw newError(S3ErrorTable.ACCESS_DENIED, s3Auth.getAccessID(), ex); @@ -322,6 +328,8 @@ protected void deleteS3Bucket(String s3BucketName) if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { throw newError(S3ErrorTable.ACCESS_DENIED, s3BucketName, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, s3Auth.getAccessID(), ex); } else if (ex.getResult() == ResultCodes.INVALID_TOKEN) { throw newError(S3ErrorTable.ACCESS_DENIED, s3Auth.getAccessID(), ex); @@ -371,22 +379,33 @@ private Iterator iterateBuckets( OzoneVolume volume = getVolume(); ownerSetter.accept(volume); return query.apply(volume); - } catch (OMException e) { - if (e.getResult() == ResultCodes.VOLUME_NOT_FOUND) { - return Collections.emptyIterator(); - } else if (e.getResult() == ResultCodes.PERMISSION_DENIED) { - throw newError(S3ErrorTable.ACCESS_DENIED, - "listBuckets", e); - } else if (e.getResult() == ResultCodes.INVALID_TOKEN) { - throw newError(S3ErrorTable.ACCESS_DENIED, - s3Auth.getAccessID(), e); - } else if (e.getResult() == ResultCodes.TIMEOUT || - e.getResult() == ResultCodes.INTERNAL_ERROR) { - throw newError(S3ErrorTable.INTERNAL_ERROR, - "listBuckets", e); - } else { - throw e; + } catch (RuntimeException e) { + if (e.getCause() instanceof OMException) { + return handleOMException((OMException) e.getCause()); } + throw e; + } catch (OMException e) { + return handleOMException(e); + } + } + + private Iterator handleOMException(OMException e) throws OMException { + if (e.getResult() == ResultCodes.VOLUME_NOT_FOUND) { + return Collections.emptyIterator(); + } else if (e.getResult() == ResultCodes.PERMISSION_DENIED) { + throw newError(S3ErrorTable.ACCESS_DENIED, + "listBuckets", e); + } else if (isExpiredToken(e)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, s3Auth.getAccessID(), e); + } else if (e.getResult() == ResultCodes.INVALID_TOKEN) { + throw newError(S3ErrorTable.ACCESS_DENIED, + s3Auth.getAccessID(), e); + } else if (e.getResult() == ResultCodes.TIMEOUT || + e.getResult() == ResultCodes.INTERNAL_ERROR) { + throw newError(S3ErrorTable.INTERNAL_ERROR, + "listBuckets", e); + } else { + throw e; } } @@ -685,6 +704,10 @@ protected boolean isAccessDenied(OMException ex) { || result == ResultCodes.REVOKED_TOKEN; } + protected boolean isExpiredToken(OMException ex) { + return ex.getResult() == ResultCodes.TOKEN_EXPIRED; + } + protected ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket) throws OS3Exception { String storageType = getHeaders().getHeaderString(STORAGE_CLASS_HEADER); String storageConfig = getHeaders().getHeaderString(CUSTOM_METADATA_HEADER_PREFIX + STORAGE_CONFIG_HEADER); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java index 69edae429207..5d31728929fd 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java @@ -164,6 +164,8 @@ private Response listParts(OzoneBucket ozoneBucket, String key, String uploadId, } catch (OMException ex) { if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { throw newError(NO_SUCH_UPLOAD, uploadId, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName + "/" + key + "/" + uploadId, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName + "/" + key + "/" + uploadId, ex); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index b18cf35d0d32..d97c514f9ae6 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -179,6 +179,8 @@ public Response put( " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail."); throw os3Exception; + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, keyPath, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else if (ex.getResult() == ResultCodes.QUOTA_EXCEEDED) { @@ -375,6 +377,8 @@ public Response get( } catch (OMException ex) { if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, keyPath, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { @@ -554,6 +558,8 @@ public Response head( if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) { // Just return 404 with no content return Response.status(Status.NOT_FOUND).build(); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, keyPath, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { @@ -640,6 +646,8 @@ public Response delete( // NOT_FOUND is not a problem, AWS doesn't throw exception for missing // keys. Just return 204 return Response.status(Status.NO_CONTENT).build(); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, keyPath, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex); } else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) { @@ -714,6 +722,9 @@ public Response initializeMultipartUpload( } catch (OMException ex) { auditWriteFailure(s3GAction, ex); getMetrics().updateInitMultipartUploadFailureStats(startNanos); + if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, key, ex); + } if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, key, ex); } @@ -794,6 +805,10 @@ public Response completeMultipartUpload( "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU"); throw os3Exception; + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, key, ex); + } else if (isAccessDenied(ex)) { + throw newError(S3ErrorTable.ACCESS_DENIED, key, ex); } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucket, ex); } @@ -959,6 +974,8 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, } if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { throw newError(NO_SUCH_UPLOAD, uploadID, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, bucketName + "/" + key, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, bucketName + "/" + key, ex); } else if (ex.getResult() == ResultCodes.INVALID_PART) { @@ -1114,6 +1131,8 @@ private CopyObjectResponse copyObject(OzoneVolume volume, throw newError(S3ErrorTable.NO_SUCH_KEY, sourceKey, ex); } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket, ex); + } else if (isExpiredToken(ex)) { + throw newError(S3ErrorTable.EXPIRED_TOKEN, destBucket + "/" + destkey, ex); } else if (isAccessDenied(ex)) { throw newError(S3ErrorTable.ACCESS_DENIED, destBucket + "/" + destkey, ex); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index 767c11506dc1..7012734b1611 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -92,6 +92,8 @@ public static Pair put( " considered as Unix Paths. Path has Violated FS Semantics " + "which caused put operation to fail."); throw os3Exception; + } else if ((((OMException) ex).getResult() == OMException.ResultCodes.TOKEN_EXPIRED)) { + throw S3ErrorTable.newError(S3ErrorTable.EXPIRED_TOKEN, keyPath); } else if ((((OMException) ex).getResult() == OMException.ResultCodes.PERMISSION_DENIED)) { throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); @@ -230,6 +232,8 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID); + } else if (ex.getResult() == OMException.ResultCodes.TOKEN_EXPIRED) { + throw S3ErrorTable.newError(S3ErrorTable.EXPIRED_TOKEN, ozoneBucket.getName() + "/" + key); } else if (ex.getResult() == OMException.ResultCodes.PERMISSION_DENIED) { throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, ozoneBucket.getName() + "/" + key); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java index f93f4a7a4d7a..009e22c42cbf 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3Exception.java @@ -17,10 +17,12 @@ package org.apache.hadoop.ozone.s3.exception; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.dataformat.xml.XmlMapper; import com.fasterxml.jackson.module.jaxb.JaxbAnnotationModule; +import com.google.common.base.Strings; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; @@ -59,6 +61,14 @@ public class OS3Exception extends RuntimeException { @XmlElement(name = "RequestId") private String requestId; + @JsonInclude(JsonInclude.Include.NON_EMPTY) + @XmlElement(name = "HostId") + private String hostId; + + @JsonInclude(JsonInclude.Include.NON_EMPTY) + @XmlElement(name = "Token-0") + private String token0; + @XmlTransient private int httpCode; @@ -125,6 +135,22 @@ public void setResource(String resource) { this.resource = resource; } + public String getHostId() { + return hostId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getToken0() { + return token0; + } + + public void setToken0(String token0) { + this.token0 = token0; + } + public int getHttpCode() { return httpCode; } @@ -146,16 +172,20 @@ public String toXml() { //When we get exception log it, and return exception as xml from actual // exception data. So, falling back to construct from exception. - String formatString = "" + - "" + - "%s" + - "%s" + - "%s" + - "%s" + - ""; - return String.format(formatString, this.getCode(), - this.getErrorMessage(), this.getResource(), - this.getRequestId()); + final StringBuilder builder = new StringBuilder("") + .append("") + .append("").append(this.getCode()).append("") + .append("").append(this.getErrorMessage()).append("") + .append("").append(this.getResource()).append("") + .append("").append(this.getRequestId()).append(""); + if (!Strings.isNullOrEmpty(this.getHostId())) { + builder.append("").append(this.getHostId()).append(""); + } + if (!Strings.isNullOrEmpty(this.getToken0())) { + builder.append("").append(this.getToken0()).append(""); + } + builder.append(""); + return builder.toString(); } /** Create a copy with specific message. */ diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java index 5f110144c118..b576cb69b4c9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/OS3ExceptionMapper.java @@ -18,10 +18,12 @@ package org.apache.hadoop.ozone.s3.exception; import javax.inject.Inject; +import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import javax.ws.rs.ext.ExceptionMapper; import javax.ws.rs.ext.Provider; import org.apache.hadoop.ozone.s3.RequestIdentifier; +import org.apache.hadoop.ozone.s3.signature.SignatureInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -32,19 +34,33 @@ @Provider public class OS3ExceptionMapper implements ExceptionMapper { + private static final String EXPIRED_TOKEN = "ExpiredToken"; + private static final Logger LOG = LoggerFactory.getLogger(OS3ExceptionMapper.class); @Inject private RequestIdentifier requestIdentifier; + @Inject + private SignatureInfo signatureInfo; + @Override public Response toResponse(OS3Exception exception) { if (LOG.isDebugEnabled()) { LOG.debug("Returning exception. ex: {}", exception.toString()); } exception.setRequestId(requestIdentifier.getRequestId()); + exception.setHostId(requestIdentifier.getAmzId()); + if (EXPIRED_TOKEN.equals(exception.getCode()) && signatureInfo != null) { + final String sessionToken = signatureInfo.getSessionToken(); + if (sessionToken != null && !sessionToken.isEmpty()) { + exception.setToken0(sessionToken); + } + } return Response.status(exception.getHttpCode()) - .entity(exception.toXml()).build(); + .entity(exception.toXml()) + .type(MediaType.APPLICATION_XML) + .build(); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 301f5940af67..c7baaa95080b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -114,6 +114,9 @@ public final class S3ErrorTable { "AccessDenied", "User doesn't have the right to access this " + "resource.", HTTP_FORBIDDEN); + public static final OS3Exception EXPIRED_TOKEN = new OS3Exception( + "ExpiredToken", "The provided token has expired.", HTTP_FORBIDDEN); + public static final OS3Exception PRECOND_FAILED = new OS3Exception( "PreconditionFailed", "At least one of the pre-conditions you " + "specified did not hold", HTTP_PRECON_FAILED); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java index 92c2f102c905..343c341ee4f5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/AWSSignatureProcessor.java @@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.audit.AuditMessage; import org.apache.hadoop.ozone.s3.HeaderPreprocessor; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.OSTSException; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.signature.SignatureInfo.Version; import org.apache.hadoop.ozone.s3.util.AuditUtils; @@ -209,7 +210,8 @@ private byte[] readAllBytes(InputStream in) throws OS3Exception, IOException { int n; while ((n = in.read(chunk)) != -1) { if (totalRead + n > OZONE_S3G_STS_PAYLOAD_HASH_MAX_VALUE) { - throw PAYLOAD_TOO_LARGE; + throw new OSTSException( + PAYLOAD_TOO_LARGE.getCode(), PAYLOAD_TOO_LARGE.getErrorMessage(), PAYLOAD_TOO_LARGE.getHttpCode()); } buffer.write(chunk, 0, n); totalRead += n; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEnabledEndpointRequestFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEnabledEndpointRequestFilter.java index 50157ea75b0f..08a93aeab3bc 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEnabledEndpointRequestFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEnabledEndpointRequestFilter.java @@ -31,7 +31,7 @@ /** * Filter that disables all endpoints annotated with {@link S3STSEnabled}. * Condition is based on the value of the configuration key - * ozone.s3g.s3sts.http.enabled. + * ozone.s3g.sts.http.enabled. */ @S3STSEnabled @Provider diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEndpoint.java index e0be5c5183d8..091f8851fa3f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3sts/S3STSEndpoint.java @@ -90,6 +90,17 @@ public class S3STSEndpoint extends S3STSEndpointBase { private static final String UNSUPPORTED_OPERATION = "UnsupportedOperation"; private static final String MALFORMED_POLICY_DOCUMENT = "MalformedPolicyDocument"; + // JAXBContext is relatively expensive to create and is threadsafe, so cache and reuse + private static final JAXBContext JAXB_CONTEXT; + + static { + try { + JAXB_CONTEXT = JAXBContext.newInstance(S3AssumeRoleResponseXml.class); + } catch (JAXBException e) { + throw new RuntimeException("Failed to initialize JAXBContext: " + e, e); + } + } + @Inject private RequestIdentifier requestIdentifier; @@ -330,8 +341,7 @@ private String generateAssumeRoleResponse(String assumedRoleUserArn, AssumeRoleR meta.setRequestId(requestId); response.setResponseMetadata(meta); - final JAXBContext jaxbContext = JAXBContext.newInstance(S3AssumeRoleResponseXml.class); - final Marshaller marshaller = jaxbContext.createMarshaller(); + final Marshaller marshaller = JAXB_CONTEXT.createMarshaller(); marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); final StringWriter stringWriter = new StringWriter(); marshaller.marshal(response, stringWriter); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 304349f43717..5159f6214128 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.protocol.ListStatusLightOptions; import org.apache.hadoop.ozone.om.helpers.AssumeRoleResponseInfo; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; @@ -575,9 +576,7 @@ public List listStatus(String volumeName, String bucketName, } @Override - public List listStatusLight(String volumeName, - String bucketName, String keyName, boolean recursive, String startKey, - long numEntries, boolean allowPartialPrefixes) throws IOException { + public List listStatusLight(ListStatusLightOptions options) throws IOException { return null; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java index 2b29bb9bcfb0..c5042ab5b248 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java @@ -24,6 +24,9 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.nio.charset.StandardCharsets; import java.util.Locale; @@ -31,6 +34,7 @@ import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.Test; @@ -126,4 +130,56 @@ public void init() { } assertFalse(endpointBase.isAccessDenied(new OMException(ResultCodes.BUCKET_NOT_FOUND))); } + @Test + public void testExpiredTokenResultCode() { + final EndpointBase endpointBase = new EndpointBase() { + @Override + public void init() { } + }; + + assertTrue(endpointBase.isExpiredToken(new OMException(ResultCodes.TOKEN_EXPIRED))); + assertFalse(endpointBase.isExpiredToken(new OMException(ResultCodes.INVALID_TOKEN))); + } + + @Test + public void testListS3BucketsHandlesRuntimeExceptionWrappingOMException() throws Exception { + final EndpointBase endpointBase = new EndpointBase() { + @Override + public void init() { } + + @Override + protected OzoneVolume getVolume() { + final OzoneVolume volume = mock(OzoneVolume.class); + when(volume.listBuckets(anyString())).thenThrow( + new RuntimeException(new OMException("Permission Denied", ResultCodes.PERMISSION_DENIED))); + return volume; + } + }; + + final OS3Exception e = assertThrows( + OS3Exception.class, () -> endpointBase.listS3Buckets( + "prefix", volume -> { }), "listS3Buckets should fail."); + + // Ensure we get the correct code + assertEquals("AccessDenied", e.getCode()); + } + + @Test + public void testListS3BucketsHandlesRuntimeExceptionWrappingOMExceptionVolumeNotFound() throws Exception { + final EndpointBase endpointBase = new EndpointBase() { + @Override + public void init() { } + + @Override + protected OzoneVolume getVolume() { + final OzoneVolume volume = mock(OzoneVolume.class); + when(volume.listBuckets(anyString())).thenThrow( + new RuntimeException(new OMException("Volume Not Found", ResultCodes.VOLUME_NOT_FOUND))); + return volume; + } + }; + + // Ensure we get an empty iterator + assertFalse(endpointBase.listS3Buckets("prefix", volume -> { }).hasNext()); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java index a4ae1fce25b1..9c1eeb41aa38 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java @@ -47,4 +47,27 @@ public void testOS3Exceptions() { ex.getRequestId()); assertEquals(expected, val); } + + @Test + public void testOS3ExceptionWithToken0() { + OS3Exception ex = new OS3Exception("ExpiredToken", "The provided token has expired.", 403); + ex = S3ErrorTable.newError(ex, "resource"); + ex.setRequestId(OzoneUtils.getRequestID()); + ex.setHostId(OzoneUtils.getRequestID()); + ex.setToken0("token-value"); + + final String val = ex.toXml(); + final String formatString = "%n" + + "%n" + + " %s%n" + + " %s%n" + + " %s%n" + + " %s%n" + + " %s%n" + + " %s%n" + + "%n"; + final String expected = String.format(formatString, ex.getCode(), ex.getErrorMessage(), ex.getResource(), + ex.getRequestId(), ex.getHostId(), ex.getToken0()); + assertEquals(expected, val); + } }