2025-12-01
This commit is contained in:
@@ -0,0 +1,215 @@
|
||||
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from logging import NullHandler
|
||||
|
||||
__version__ = '1.39.17'
|
||||
|
||||
|
||||
# Configure default logger to do nothing
|
||||
log = logging.getLogger('botocore')
|
||||
log.addHandler(NullHandler())
|
||||
|
||||
_INITIALIZERS = []
|
||||
|
||||
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
|
||||
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
|
||||
# The regex below handles the special case where some acronym
|
||||
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
|
||||
_special_case_transform = re.compile('[A-Z]{2,}s$')
|
||||
# Prepopulate the cache with special cases that don't match
|
||||
# our regular transformation.
|
||||
_xform_cache = {
|
||||
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
|
||||
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
|
||||
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
|
||||
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
|
||||
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
|
||||
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
|
||||
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
|
||||
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
|
||||
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
|
||||
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
|
||||
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
|
||||
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
|
||||
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
|
||||
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
|
||||
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
|
||||
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
|
||||
(
|
||||
'AssociateWhatsAppBusinessAccount',
|
||||
'_',
|
||||
): 'associate_whatsapp_business_account',
|
||||
(
|
||||
'AssociateWhatsAppBusinessAccount',
|
||||
'-',
|
||||
): 'associate-whatsapp-business-account',
|
||||
('CreateWhatsAppMessageTemplate', '_'): 'create_whatsapp_message_template',
|
||||
('CreateWhatsAppMessageTemplate', '-'): 'create-whatsapp-message-template',
|
||||
(
|
||||
'CreateWhatsAppMessageTemplateFromLibrary',
|
||||
'_',
|
||||
): 'create_whatsapp_message_template_from_library',
|
||||
(
|
||||
'CreateWhatsAppMessageTemplateFromLibrary',
|
||||
'-',
|
||||
): 'create-whatsapp-message-template-from-library',
|
||||
(
|
||||
'CreateWhatsAppMessageTemplateMedia',
|
||||
'_',
|
||||
): 'create_whatsapp_message_template_media',
|
||||
(
|
||||
'CreateWhatsAppMessageTemplateMedia',
|
||||
'-',
|
||||
): 'create-whatsapp-message-template-media',
|
||||
('DeleteWhatsAppMessageMedia', '_'): 'delete_whatsapp_message_media',
|
||||
('DeleteWhatsAppMessageMedia', '-'): 'delete-whatsapp-message-media',
|
||||
('DeleteWhatsAppMessageTemplate', '_'): 'delete_whatsapp_message_template',
|
||||
('DeleteWhatsAppMessageTemplate', '-'): 'delete-whatsapp-message-template',
|
||||
(
|
||||
'DisassociateWhatsAppBusinessAccount',
|
||||
'_',
|
||||
): 'disassociate_whatsapp_business_account',
|
||||
(
|
||||
'DisassociateWhatsAppBusinessAccount',
|
||||
'-',
|
||||
): 'disassociate-whatsapp-business-account',
|
||||
(
|
||||
'GetLinkedWhatsAppBusinessAccount',
|
||||
'_',
|
||||
): 'get_linked_whatsapp_business_account',
|
||||
(
|
||||
'GetLinkedWhatsAppBusinessAccount',
|
||||
'-',
|
||||
): 'get-linked-whatsapp-business-account',
|
||||
(
|
||||
'GetLinkedWhatsAppBusinessAccountPhoneNumber',
|
||||
'_',
|
||||
): 'get_linked_whatsapp_business_account_phone_number',
|
||||
(
|
||||
'GetLinkedWhatsAppBusinessAccountPhoneNumber',
|
||||
'-',
|
||||
): 'get-linked-whatsapp-business-account-phone-number',
|
||||
('GetWhatsAppMessageMedia', '_'): 'get_whatsapp_message_media',
|
||||
('GetWhatsAppMessageMedia', '-'): 'get-whatsapp-message-media',
|
||||
('GetWhatsAppMessageTemplate', '_'): 'get_whatsapp_message_template',
|
||||
('GetWhatsAppMessageTemplate', '-'): 'get-whatsapp-message-template',
|
||||
(
|
||||
'ListLinkedWhatsAppBusinessAccounts',
|
||||
'_',
|
||||
): 'list_linked_whatsapp_business_accounts',
|
||||
(
|
||||
'ListLinkedWhatsAppBusinessAccounts',
|
||||
'-',
|
||||
): 'list-linked-whatsapp-business-accounts',
|
||||
('ListWhatsAppMessageTemplates', '_'): 'list_whatsapp_message_templates',
|
||||
('ListWhatsAppMessageTemplates', '-'): 'list-whatsapp-message-templates',
|
||||
('ListWhatsAppTemplateLibrary', '_'): 'list_whatsapp_template_library',
|
||||
('ListWhatsAppTemplateLibrary', '-'): 'list-whatsapp-template-library',
|
||||
('PostWhatsAppMessageMedia', '_'): 'post_whatsapp_message_media',
|
||||
('PostWhatsAppMessageMedia', '-'): 'post-whatsapp-message-media',
|
||||
(
|
||||
'PutWhatsAppBusinessAccountEventDestinations',
|
||||
'_',
|
||||
): 'put_whatsapp_business_account_event_destinations',
|
||||
(
|
||||
'PutWhatsAppBusinessAccountEventDestinations',
|
||||
'-',
|
||||
): 'put-whatsapp-business-account-event-destinations',
|
||||
('SendWhatsAppMessage', '_'): 'send_whatsapp_message',
|
||||
('SendWhatsAppMessage', '-'): 'send-whatsapp-message',
|
||||
('UpdateWhatsAppMessageTemplate', '_'): 'update_whatsapp_message_template',
|
||||
('UpdateWhatsAppMessageTemplate', '-'): 'update-whatsapp-message-template',
|
||||
}
|
||||
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
|
||||
|
||||
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
# Used to specify anonymous (unsigned) request signature
|
||||
class UNSIGNED:
|
||||
def __copy__(self):
|
||||
return self
|
||||
|
||||
def __deepcopy__(self, memodict):
|
||||
return self
|
||||
|
||||
|
||||
UNSIGNED = UNSIGNED()
|
||||
|
||||
|
||||
def xform_name(name, sep='_', _xform_cache=_xform_cache):
|
||||
"""Convert camel case to a "pythonic" name.
|
||||
|
||||
If the name contains the ``sep`` character, then it is
|
||||
returned unchanged.
|
||||
|
||||
"""
|
||||
if sep in name:
|
||||
# If the sep is in the name, assume that it's already
|
||||
# transformed and return the string unchanged.
|
||||
return name
|
||||
key = (name, sep)
|
||||
if key not in _xform_cache:
|
||||
if _special_case_transform.search(name) is not None:
|
||||
is_special = _special_case_transform.search(name)
|
||||
matched = is_special.group()
|
||||
# Replace something like ARNs, ACLs with _arns, _acls.
|
||||
name = f"{name[: -len(matched)]}{sep}{matched.lower()}"
|
||||
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
|
||||
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
|
||||
_xform_cache[key] = transformed
|
||||
return _xform_cache[key]
|
||||
|
||||
|
||||
def register_initializer(callback):
|
||||
"""Register an initializer function for session creation.
|
||||
|
||||
This initializer function will be invoked whenever a new
|
||||
`botocore.session.Session` is instantiated.
|
||||
|
||||
:type callback: callable
|
||||
:param callback: A callable that accepts a single argument
|
||||
of type `botocore.session.Session`.
|
||||
|
||||
"""
|
||||
_INITIALIZERS.append(callback)
|
||||
|
||||
|
||||
def unregister_initializer(callback):
|
||||
"""Unregister an initializer function.
|
||||
|
||||
:type callback: callable
|
||||
:param callback: A callable that was previously registered
|
||||
with `botocore.register_initializer`.
|
||||
|
||||
:raises ValueError: If a callback is provided that is not currently
|
||||
registered as an initializer.
|
||||
|
||||
"""
|
||||
_INITIALIZERS.remove(callback)
|
||||
|
||||
|
||||
def invoke_initializers(session):
|
||||
"""Invoke all initializers for a session.
|
||||
|
||||
:type session: botocore.session.Session
|
||||
:param session: The session to initialize.
|
||||
|
||||
"""
|
||||
for initializer in _INITIALIZERS:
|
||||
initializer(session)
|
||||
@@ -0,0 +1,983 @@
|
||||
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
"""Internal module to help with normalizing botocore client args.
|
||||
|
||||
This module (and all function/classes within this module) should be
|
||||
considered internal, and *not* a public API.
|
||||
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import botocore.exceptions
|
||||
import botocore.parsers
|
||||
import botocore.serialize
|
||||
from botocore.config import Config
|
||||
from botocore.endpoint import EndpointCreator
|
||||
from botocore.regions import EndpointResolverBuiltins as EPRBuiltins
|
||||
from botocore.regions import EndpointRulesetResolver
|
||||
from botocore.signers import RequestSigner
|
||||
from botocore.useragent import UserAgentString, register_feature_id
|
||||
from botocore.utils import (
|
||||
PRIORITY_ORDERED_SUPPORTED_PROTOCOLS, # noqa: F401
|
||||
ensure_boolean,
|
||||
is_s3_accelerate_url,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
VALID_REGIONAL_ENDPOINTS_CONFIG = [
|
||||
'legacy',
|
||||
'regional',
|
||||
]
|
||||
LEGACY_GLOBAL_STS_REGIONS = [
|
||||
'ap-northeast-1',
|
||||
'ap-south-1',
|
||||
'ap-southeast-1',
|
||||
'ap-southeast-2',
|
||||
'aws-global',
|
||||
'ca-central-1',
|
||||
'eu-central-1',
|
||||
'eu-north-1',
|
||||
'eu-west-1',
|
||||
'eu-west-2',
|
||||
'eu-west-3',
|
||||
'sa-east-1',
|
||||
'us-east-1',
|
||||
'us-east-2',
|
||||
'us-west-1',
|
||||
'us-west-2',
|
||||
]
|
||||
# Maximum allowed length of the ``user_agent_appid`` config field. Longer
|
||||
# values result in a warning-level log message.
|
||||
USERAGENT_APPID_MAXLEN = 50
|
||||
|
||||
VALID_REQUEST_CHECKSUM_CALCULATION_CONFIG = (
|
||||
"when_supported",
|
||||
"when_required",
|
||||
)
|
||||
VALID_RESPONSE_CHECKSUM_VALIDATION_CONFIG = (
|
||||
"when_supported",
|
||||
"when_required",
|
||||
)
|
||||
|
||||
|
||||
VALID_ACCOUNT_ID_ENDPOINT_MODE_CONFIG = (
|
||||
'preferred',
|
||||
'disabled',
|
||||
'required',
|
||||
)
|
||||
|
||||
|
||||
class ClientArgsCreator:
|
||||
def __init__(
|
||||
self,
|
||||
event_emitter,
|
||||
user_agent,
|
||||
response_parser_factory,
|
||||
loader,
|
||||
exceptions_factory,
|
||||
config_store,
|
||||
user_agent_creator=None,
|
||||
):
|
||||
self._event_emitter = event_emitter
|
||||
self._response_parser_factory = response_parser_factory
|
||||
self._loader = loader
|
||||
self._exceptions_factory = exceptions_factory
|
||||
self._config_store = config_store
|
||||
if user_agent_creator is None:
|
||||
self._session_ua_creator = UserAgentString.from_environment()
|
||||
else:
|
||||
self._session_ua_creator = user_agent_creator
|
||||
|
||||
def get_client_args(
|
||||
self,
|
||||
service_model,
|
||||
region_name,
|
||||
is_secure,
|
||||
endpoint_url,
|
||||
verify,
|
||||
credentials,
|
||||
scoped_config,
|
||||
client_config,
|
||||
endpoint_bridge,
|
||||
auth_token=None,
|
||||
endpoints_ruleset_data=None,
|
||||
partition_data=None,
|
||||
):
|
||||
final_args = self.compute_client_args(
|
||||
service_model,
|
||||
client_config,
|
||||
endpoint_bridge,
|
||||
region_name,
|
||||
endpoint_url,
|
||||
is_secure,
|
||||
scoped_config,
|
||||
)
|
||||
|
||||
service_name = final_args['service_name'] # noqa
|
||||
parameter_validation = final_args['parameter_validation']
|
||||
endpoint_config = final_args['endpoint_config']
|
||||
protocol = final_args['protocol']
|
||||
config_kwargs = final_args['config_kwargs']
|
||||
s3_config = final_args['s3_config']
|
||||
partition = endpoint_config['metadata'].get('partition', None)
|
||||
socket_options = final_args['socket_options']
|
||||
configured_endpoint_url = final_args['configured_endpoint_url']
|
||||
signing_region = endpoint_config['signing_region']
|
||||
endpoint_region_name = endpoint_config['region_name']
|
||||
account_id_endpoint_mode = config_kwargs['account_id_endpoint_mode']
|
||||
|
||||
event_emitter = copy.copy(self._event_emitter)
|
||||
signer = RequestSigner(
|
||||
service_model.service_id,
|
||||
signing_region,
|
||||
endpoint_config['signing_name'],
|
||||
endpoint_config['signature_version'],
|
||||
credentials,
|
||||
event_emitter,
|
||||
auth_token,
|
||||
)
|
||||
|
||||
config_kwargs['s3'] = s3_config
|
||||
new_config = Config(**config_kwargs)
|
||||
endpoint_creator = EndpointCreator(event_emitter)
|
||||
|
||||
endpoint = endpoint_creator.create_endpoint(
|
||||
service_model,
|
||||
region_name=endpoint_region_name,
|
||||
endpoint_url=endpoint_config['endpoint_url'],
|
||||
verify=verify,
|
||||
response_parser_factory=self._response_parser_factory,
|
||||
max_pool_connections=new_config.max_pool_connections,
|
||||
proxies=new_config.proxies,
|
||||
timeout=(new_config.connect_timeout, new_config.read_timeout),
|
||||
socket_options=socket_options,
|
||||
client_cert=new_config.client_cert,
|
||||
proxies_config=new_config.proxies_config,
|
||||
)
|
||||
|
||||
serializer = botocore.serialize.create_serializer(
|
||||
protocol, parameter_validation
|
||||
)
|
||||
response_parser = botocore.parsers.create_parser(protocol)
|
||||
|
||||
ruleset_resolver = self._build_endpoint_resolver(
|
||||
endpoints_ruleset_data,
|
||||
partition_data,
|
||||
client_config,
|
||||
service_model,
|
||||
endpoint_region_name,
|
||||
region_name,
|
||||
configured_endpoint_url,
|
||||
endpoint,
|
||||
is_secure,
|
||||
endpoint_bridge,
|
||||
event_emitter,
|
||||
credentials,
|
||||
account_id_endpoint_mode,
|
||||
)
|
||||
|
||||
# Copy the session's user agent factory and adds client configuration.
|
||||
client_ua_creator = self._session_ua_creator.with_client_config(
|
||||
new_config
|
||||
)
|
||||
supplied_ua = client_config.user_agent if client_config else None
|
||||
new_config._supplied_user_agent = supplied_ua
|
||||
|
||||
return {
|
||||
'serializer': serializer,
|
||||
'endpoint': endpoint,
|
||||
'response_parser': response_parser,
|
||||
'event_emitter': event_emitter,
|
||||
'request_signer': signer,
|
||||
'service_model': service_model,
|
||||
'loader': self._loader,
|
||||
'client_config': new_config,
|
||||
'partition': partition,
|
||||
'exceptions_factory': self._exceptions_factory,
|
||||
'endpoint_ruleset_resolver': ruleset_resolver,
|
||||
'user_agent_creator': client_ua_creator,
|
||||
}
|
||||
|
||||
def compute_client_args(
|
||||
self,
|
||||
service_model,
|
||||
client_config,
|
||||
endpoint_bridge,
|
||||
region_name,
|
||||
endpoint_url,
|
||||
is_secure,
|
||||
scoped_config,
|
||||
):
|
||||
service_name = service_model.endpoint_prefix
|
||||
protocol = service_model.resolved_protocol
|
||||
parameter_validation = True
|
||||
if client_config and not client_config.parameter_validation:
|
||||
parameter_validation = False
|
||||
elif scoped_config:
|
||||
raw_value = scoped_config.get('parameter_validation')
|
||||
if raw_value is not None:
|
||||
parameter_validation = ensure_boolean(raw_value)
|
||||
|
||||
s3_config = self.compute_s3_config(client_config)
|
||||
|
||||
configured_endpoint_url = self._compute_configured_endpoint_url(
|
||||
client_config=client_config,
|
||||
endpoint_url=endpoint_url,
|
||||
)
|
||||
if configured_endpoint_url is not None:
|
||||
register_feature_id('ENDPOINT_OVERRIDE')
|
||||
|
||||
endpoint_config = self._compute_endpoint_config(
|
||||
service_name=service_name,
|
||||
region_name=region_name,
|
||||
endpoint_url=configured_endpoint_url,
|
||||
is_secure=is_secure,
|
||||
endpoint_bridge=endpoint_bridge,
|
||||
s3_config=s3_config,
|
||||
)
|
||||
endpoint_variant_tags = endpoint_config['metadata'].get('tags', [])
|
||||
|
||||
# Some third-party libraries expect the final user-agent string in
|
||||
# ``client.meta.config.user_agent``. To maintain backwards
|
||||
# compatibility, the preliminary user-agent string (before any Config
|
||||
# object modifications and without request-specific user-agent
|
||||
# components) is stored in the new Config object's ``user_agent``
|
||||
# property but not used by Botocore itself.
|
||||
preliminary_ua_string = self._session_ua_creator.with_client_config(
|
||||
client_config
|
||||
).to_string()
|
||||
# Create a new client config to be passed to the client based
|
||||
# on the final values. We do not want the user to be able
|
||||
# to try to modify an existing client with a client config.
|
||||
config_kwargs = dict(
|
||||
region_name=endpoint_config['region_name'],
|
||||
signature_version=endpoint_config['signature_version'],
|
||||
user_agent=preliminary_ua_string,
|
||||
)
|
||||
if 'dualstack' in endpoint_variant_tags:
|
||||
config_kwargs.update(use_dualstack_endpoint=True)
|
||||
if 'fips' in endpoint_variant_tags:
|
||||
config_kwargs.update(use_fips_endpoint=True)
|
||||
if client_config is not None:
|
||||
config_kwargs.update(
|
||||
connect_timeout=client_config.connect_timeout,
|
||||
read_timeout=client_config.read_timeout,
|
||||
max_pool_connections=client_config.max_pool_connections,
|
||||
proxies=client_config.proxies,
|
||||
proxies_config=client_config.proxies_config,
|
||||
retries=client_config.retries,
|
||||
client_cert=client_config.client_cert,
|
||||
inject_host_prefix=client_config.inject_host_prefix,
|
||||
tcp_keepalive=client_config.tcp_keepalive,
|
||||
user_agent_extra=client_config.user_agent_extra,
|
||||
user_agent_appid=client_config.user_agent_appid,
|
||||
request_min_compression_size_bytes=(
|
||||
client_config.request_min_compression_size_bytes
|
||||
),
|
||||
disable_request_compression=(
|
||||
client_config.disable_request_compression
|
||||
),
|
||||
client_context_params=client_config.client_context_params,
|
||||
sigv4a_signing_region_set=(
|
||||
client_config.sigv4a_signing_region_set
|
||||
),
|
||||
request_checksum_calculation=(
|
||||
client_config.request_checksum_calculation
|
||||
),
|
||||
response_checksum_validation=(
|
||||
client_config.response_checksum_validation
|
||||
),
|
||||
account_id_endpoint_mode=client_config.account_id_endpoint_mode,
|
||||
auth_scheme_preference=client_config.auth_scheme_preference,
|
||||
)
|
||||
self._compute_retry_config(config_kwargs)
|
||||
self._compute_connect_timeout(config_kwargs)
|
||||
self._compute_user_agent_appid_config(config_kwargs)
|
||||
self._compute_request_compression_config(config_kwargs)
|
||||
self._compute_sigv4a_signing_region_set_config(config_kwargs)
|
||||
self._compute_checksum_config(config_kwargs)
|
||||
self._compute_account_id_endpoint_mode_config(config_kwargs)
|
||||
self._compute_inject_host_prefix(client_config, config_kwargs)
|
||||
self._compute_auth_scheme_preference_config(
|
||||
client_config, config_kwargs
|
||||
)
|
||||
self._compute_signature_version_config(client_config, config_kwargs)
|
||||
s3_config = self.compute_s3_config(client_config)
|
||||
|
||||
is_s3_service = self._is_s3_service(service_name)
|
||||
|
||||
if is_s3_service and 'dualstack' in endpoint_variant_tags:
|
||||
if s3_config is None:
|
||||
s3_config = {}
|
||||
s3_config['use_dualstack_endpoint'] = True
|
||||
|
||||
return {
|
||||
'service_name': service_name,
|
||||
'parameter_validation': parameter_validation,
|
||||
'configured_endpoint_url': configured_endpoint_url,
|
||||
'endpoint_config': endpoint_config,
|
||||
'protocol': protocol,
|
||||
'config_kwargs': config_kwargs,
|
||||
's3_config': s3_config,
|
||||
'socket_options': self._compute_socket_options(
|
||||
scoped_config, client_config
|
||||
),
|
||||
}
|
||||
|
||||
def _compute_inject_host_prefix(self, client_config, config_kwargs):
|
||||
# In the cases that a Config object was not provided, or the private value
|
||||
# remained UNSET, we should resolve the value from the config store.
|
||||
if (
|
||||
client_config is None
|
||||
or client_config._inject_host_prefix == 'UNSET'
|
||||
):
|
||||
configured_disable_host_prefix_injection = (
|
||||
self._config_store.get_config_variable(
|
||||
'disable_host_prefix_injection'
|
||||
)
|
||||
)
|
||||
if configured_disable_host_prefix_injection is not None:
|
||||
config_kwargs[
|
||||
'inject_host_prefix'
|
||||
] = not configured_disable_host_prefix_injection
|
||||
else:
|
||||
config_kwargs['inject_host_prefix'] = True
|
||||
|
||||
def _compute_configured_endpoint_url(self, client_config, endpoint_url):
|
||||
if endpoint_url is not None:
|
||||
return endpoint_url
|
||||
|
||||
if self._ignore_configured_endpoint_urls(client_config):
|
||||
logger.debug("Ignoring configured endpoint URLs.")
|
||||
return endpoint_url
|
||||
|
||||
return self._config_store.get_config_variable('endpoint_url')
|
||||
|
||||
def _ignore_configured_endpoint_urls(self, client_config):
|
||||
if (
|
||||
client_config
|
||||
and client_config.ignore_configured_endpoint_urls is not None
|
||||
):
|
||||
return client_config.ignore_configured_endpoint_urls
|
||||
|
||||
return self._config_store.get_config_variable(
|
||||
'ignore_configured_endpoint_urls'
|
||||
)
|
||||
|
||||
def compute_s3_config(self, client_config):
|
||||
s3_configuration = self._config_store.get_config_variable('s3')
|
||||
|
||||
# Next specific client config values takes precedence over
|
||||
# specific values in the scoped config.
|
||||
if client_config is not None:
|
||||
if client_config.s3 is not None:
|
||||
if s3_configuration is None:
|
||||
s3_configuration = client_config.s3
|
||||
else:
|
||||
# The current s3_configuration dictionary may be
|
||||
# from a source that only should be read from so
|
||||
# we want to be safe and just make a copy of it to modify
|
||||
# before it actually gets updated.
|
||||
s3_configuration = s3_configuration.copy()
|
||||
s3_configuration.update(client_config.s3)
|
||||
|
||||
return s3_configuration
|
||||
|
||||
def _is_s3_service(self, service_name):
|
||||
"""Whether the service is S3 or S3 Control.
|
||||
|
||||
Note that throughout this class, service_name refers to the endpoint
|
||||
prefix, not the folder name of the service in botocore/data. For
|
||||
S3 Control, the folder name is 's3control' but the endpoint prefix is
|
||||
's3-control'.
|
||||
"""
|
||||
return service_name in ['s3', 's3-control']
|
||||
|
||||
def _compute_endpoint_config(
|
||||
self,
|
||||
service_name,
|
||||
region_name,
|
||||
endpoint_url,
|
||||
is_secure,
|
||||
endpoint_bridge,
|
||||
s3_config,
|
||||
):
|
||||
resolve_endpoint_kwargs = {
|
||||
'service_name': service_name,
|
||||
'region_name': region_name,
|
||||
'endpoint_url': endpoint_url,
|
||||
'is_secure': is_secure,
|
||||
'endpoint_bridge': endpoint_bridge,
|
||||
}
|
||||
if service_name == 's3':
|
||||
return self._compute_s3_endpoint_config(
|
||||
s3_config=s3_config, **resolve_endpoint_kwargs
|
||||
)
|
||||
if service_name == 'sts':
|
||||
return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs)
|
||||
return self._resolve_endpoint(**resolve_endpoint_kwargs)
|
||||
|
||||
def _compute_s3_endpoint_config(
|
||||
self, s3_config, **resolve_endpoint_kwargs
|
||||
):
|
||||
force_s3_global = self._should_force_s3_global(
|
||||
resolve_endpoint_kwargs['region_name'], s3_config
|
||||
)
|
||||
if force_s3_global:
|
||||
resolve_endpoint_kwargs['region_name'] = None
|
||||
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
|
||||
self._set_region_if_custom_s3_endpoint(
|
||||
endpoint_config, resolve_endpoint_kwargs['endpoint_bridge']
|
||||
)
|
||||
# For backwards compatibility reasons, we want to make sure the
|
||||
# client.meta.region_name will remain us-east-1 if we forced the
|
||||
# endpoint to be the global region. Specifically, if this value
|
||||
# changes to aws-global, it breaks logic where a user is checking
|
||||
# for us-east-1 as the global endpoint such as in creating buckets.
|
||||
if force_s3_global and endpoint_config['region_name'] == 'aws-global':
|
||||
endpoint_config['region_name'] = 'us-east-1'
|
||||
return endpoint_config
|
||||
|
||||
def _should_force_s3_global(self, region_name, s3_config):
|
||||
s3_regional_config = 'legacy'
|
||||
if s3_config and 'us_east_1_regional_endpoint' in s3_config:
|
||||
s3_regional_config = s3_config['us_east_1_regional_endpoint']
|
||||
self._validate_s3_regional_config(s3_regional_config)
|
||||
|
||||
is_global_region = region_name in ('us-east-1', None)
|
||||
return s3_regional_config == 'legacy' and is_global_region
|
||||
|
||||
def _validate_s3_regional_config(self, config_val):
|
||||
if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG:
|
||||
raise botocore.exceptions.InvalidS3UsEast1RegionalEndpointConfigError(
|
||||
s3_us_east_1_regional_endpoint_config=config_val
|
||||
)
|
||||
|
||||
def _set_region_if_custom_s3_endpoint(
|
||||
self, endpoint_config, endpoint_bridge
|
||||
):
|
||||
# If a user is providing a custom URL, the endpoint resolver will
|
||||
# refuse to infer a signing region. If we want to default to s3v4,
|
||||
# we have to account for this.
|
||||
if (
|
||||
endpoint_config['signing_region'] is None
|
||||
and endpoint_config['region_name'] is None
|
||||
):
|
||||
endpoint = endpoint_bridge.resolve('s3')
|
||||
endpoint_config['signing_region'] = endpoint['signing_region']
|
||||
endpoint_config['region_name'] = endpoint['region_name']
|
||||
|
||||
def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs):
|
||||
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
|
||||
if self._should_set_global_sts_endpoint(
|
||||
resolve_endpoint_kwargs['region_name'],
|
||||
resolve_endpoint_kwargs['endpoint_url'],
|
||||
endpoint_config,
|
||||
):
|
||||
self._set_global_sts_endpoint(
|
||||
endpoint_config, resolve_endpoint_kwargs['is_secure']
|
||||
)
|
||||
return endpoint_config
|
||||
|
||||
def _should_set_global_sts_endpoint(
|
||||
self, region_name, endpoint_url, endpoint_config
|
||||
):
|
||||
has_variant_tags = endpoint_config and endpoint_config.get(
|
||||
'metadata', {}
|
||||
).get('tags')
|
||||
if endpoint_url or has_variant_tags:
|
||||
return False
|
||||
return (
|
||||
self._get_sts_regional_endpoints_config() == 'legacy'
|
||||
and region_name in LEGACY_GLOBAL_STS_REGIONS
|
||||
)
|
||||
|
||||
def _get_sts_regional_endpoints_config(self):
|
||||
sts_regional_endpoints_config = self._config_store.get_config_variable(
|
||||
'sts_regional_endpoints'
|
||||
)
|
||||
if not sts_regional_endpoints_config:
|
||||
sts_regional_endpoints_config = 'legacy'
|
||||
if (
|
||||
sts_regional_endpoints_config
|
||||
not in VALID_REGIONAL_ENDPOINTS_CONFIG
|
||||
):
|
||||
raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError(
|
||||
sts_regional_endpoints_config=sts_regional_endpoints_config
|
||||
)
|
||||
return sts_regional_endpoints_config
|
||||
|
||||
def _set_global_sts_endpoint(self, endpoint_config, is_secure):
|
||||
scheme = 'https' if is_secure else 'http'
|
||||
endpoint_config['endpoint_url'] = f'{scheme}://sts.amazonaws.com'
|
||||
endpoint_config['signing_region'] = 'us-east-1'
|
||||
|
||||
def _resolve_endpoint(
|
||||
self,
|
||||
service_name,
|
||||
region_name,
|
||||
endpoint_url,
|
||||
is_secure,
|
||||
endpoint_bridge,
|
||||
):
|
||||
return endpoint_bridge.resolve(
|
||||
service_name, region_name, endpoint_url, is_secure
|
||||
)
|
||||
|
||||
def _compute_socket_options(self, scoped_config, client_config=None):
|
||||
# This disables Nagle's algorithm and is the default socket options
|
||||
# in urllib3.
|
||||
socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
|
||||
client_keepalive = client_config and client_config.tcp_keepalive
|
||||
scoped_keepalive = scoped_config and self._ensure_boolean(
|
||||
scoped_config.get("tcp_keepalive", False)
|
||||
)
|
||||
# Enables TCP Keepalive if specified in client config object or shared config file.
|
||||
if client_keepalive or scoped_keepalive:
|
||||
socket_options.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
|
||||
return socket_options
|
||||
|
||||
def _compute_retry_config(self, config_kwargs):
|
||||
self._compute_retry_max_attempts(config_kwargs)
|
||||
self._compute_retry_mode(config_kwargs)
|
||||
|
||||
def _compute_retry_max_attempts(self, config_kwargs):
|
||||
# There's a pre-existing max_attempts client config value that actually
|
||||
# means max *retry* attempts. There's also a `max_attempts` we pull
|
||||
# from the config store that means *total attempts*, which includes the
|
||||
# intitial request. We can't change what `max_attempts` means in
|
||||
# client config so we try to normalize everything to a new
|
||||
# "total_max_attempts" variable. We ensure that after this, the only
|
||||
# configuration for "max attempts" is the 'total_max_attempts' key.
|
||||
# An explicitly provided max_attempts in the client config
|
||||
# overrides everything.
|
||||
retries = config_kwargs.get('retries')
|
||||
if retries is not None:
|
||||
if 'total_max_attempts' in retries:
|
||||
retries.pop('max_attempts', None)
|
||||
return
|
||||
if 'max_attempts' in retries:
|
||||
value = retries.pop('max_attempts')
|
||||
# client config max_attempts means total retries so we
|
||||
# have to add one for 'total_max_attempts' to account
|
||||
# for the initial request.
|
||||
retries['total_max_attempts'] = value + 1
|
||||
return
|
||||
# Otherwise we'll check the config store which checks env vars,
|
||||
# config files, etc. There is no default value for max_attempts
|
||||
# so if this returns None and we don't set a default value here.
|
||||
max_attempts = self._config_store.get_config_variable('max_attempts')
|
||||
if max_attempts is not None:
|
||||
if retries is None:
|
||||
retries = {}
|
||||
config_kwargs['retries'] = retries
|
||||
retries['total_max_attempts'] = max_attempts
|
||||
|
||||
def _compute_retry_mode(self, config_kwargs):
|
||||
retries = config_kwargs.get('retries')
|
||||
if retries is None:
|
||||
retries = {}
|
||||
config_kwargs['retries'] = retries
|
||||
elif 'mode' in retries:
|
||||
# If there's a retry mode explicitly set in the client config
|
||||
# that overrides everything.
|
||||
return
|
||||
retry_mode = self._config_store.get_config_variable('retry_mode')
|
||||
if retry_mode is None:
|
||||
retry_mode = 'legacy'
|
||||
retries['mode'] = retry_mode
|
||||
|
||||
def _compute_connect_timeout(self, config_kwargs):
|
||||
# Checking if connect_timeout is set on the client config.
|
||||
# If it is not, we check the config_store in case a
|
||||
# non legacy default mode has been configured.
|
||||
connect_timeout = config_kwargs.get('connect_timeout')
|
||||
if connect_timeout is not None:
|
||||
return
|
||||
connect_timeout = self._config_store.get_config_variable(
|
||||
'connect_timeout'
|
||||
)
|
||||
if connect_timeout:
|
||||
config_kwargs['connect_timeout'] = connect_timeout
|
||||
|
||||
def _compute_request_compression_config(self, config_kwargs):
|
||||
min_size = config_kwargs.get('request_min_compression_size_bytes')
|
||||
disabled = config_kwargs.get('disable_request_compression')
|
||||
if min_size is None:
|
||||
min_size = self._config_store.get_config_variable(
|
||||
'request_min_compression_size_bytes'
|
||||
)
|
||||
# conversion func is skipped so input validation must be done here
|
||||
# regardless if the value is coming from the config store or the
|
||||
# config object
|
||||
min_size = self._validate_min_compression_size(min_size)
|
||||
config_kwargs['request_min_compression_size_bytes'] = min_size
|
||||
|
||||
if disabled is None:
|
||||
disabled = self._config_store.get_config_variable(
|
||||
'disable_request_compression'
|
||||
)
|
||||
else:
|
||||
# if the user provided a value we must check if it's a boolean
|
||||
disabled = ensure_boolean(disabled)
|
||||
config_kwargs['disable_request_compression'] = disabled
|
||||
|
||||
def _validate_min_compression_size(self, min_size):
|
||||
min_allowed_min_size = 1
|
||||
max_allowed_min_size = 1048576
|
||||
error_msg_base = (
|
||||
f'Invalid value "{min_size}" for '
|
||||
'request_min_compression_size_bytes.'
|
||||
)
|
||||
try:
|
||||
min_size = int(min_size)
|
||||
except (ValueError, TypeError):
|
||||
msg = (
|
||||
f'{error_msg_base} Value must be an integer. '
|
||||
f'Received {type(min_size)} instead.'
|
||||
)
|
||||
raise botocore.exceptions.InvalidConfigError(error_msg=msg)
|
||||
if not min_allowed_min_size <= min_size <= max_allowed_min_size:
|
||||
msg = (
|
||||
f'{error_msg_base} Value must be between '
|
||||
f'{min_allowed_min_size} and {max_allowed_min_size}.'
|
||||
)
|
||||
raise botocore.exceptions.InvalidConfigError(error_msg=msg)
|
||||
|
||||
return min_size
|
||||
|
||||
def _ensure_boolean(self, val):
|
||||
if isinstance(val, bool):
|
||||
return val
|
||||
else:
|
||||
return val.lower() == 'true'
|
||||
|
||||
def _build_endpoint_resolver(
|
||||
self,
|
||||
endpoints_ruleset_data,
|
||||
partition_data,
|
||||
client_config,
|
||||
service_model,
|
||||
endpoint_region_name,
|
||||
region_name,
|
||||
endpoint_url,
|
||||
endpoint,
|
||||
is_secure,
|
||||
endpoint_bridge,
|
||||
event_emitter,
|
||||
credentials,
|
||||
account_id_endpoint_mode,
|
||||
):
|
||||
if endpoints_ruleset_data is None:
|
||||
return None
|
||||
|
||||
# The legacy EndpointResolver is global to the session, but
|
||||
# EndpointRulesetResolver is service-specific. Builtins for
|
||||
# EndpointRulesetResolver must not be derived from the legacy
|
||||
# endpoint resolver's output, including final_args, s3_config,
|
||||
# etc.
|
||||
s3_config_raw = self.compute_s3_config(client_config) or {}
|
||||
service_name_raw = service_model.endpoint_prefix
|
||||
# Maintain complex logic for s3 and sts endpoints for backwards
|
||||
# compatibility.
|
||||
if service_name_raw in ['s3', 'sts'] or region_name is None:
|
||||
eprv2_region_name = endpoint_region_name
|
||||
else:
|
||||
eprv2_region_name = region_name
|
||||
resolver_builtins = self.compute_endpoint_resolver_builtin_defaults(
|
||||
region_name=eprv2_region_name,
|
||||
service_name=service_name_raw,
|
||||
s3_config=s3_config_raw,
|
||||
endpoint_bridge=endpoint_bridge,
|
||||
client_endpoint_url=endpoint_url,
|
||||
legacy_endpoint_url=endpoint.host,
|
||||
credentials=credentials,
|
||||
account_id_endpoint_mode=account_id_endpoint_mode,
|
||||
)
|
||||
# Client context params for s3 conflict with the available settings
|
||||
# in the `s3` parameter on the `Config` object. If the same parameter
|
||||
# is set in both places, the value in the `s3` parameter takes priority.
|
||||
if client_config is not None:
|
||||
client_context = client_config.client_context_params or {}
|
||||
else:
|
||||
client_context = {}
|
||||
if self._is_s3_service(service_name_raw):
|
||||
client_context.update(s3_config_raw)
|
||||
|
||||
sig_version = (
|
||||
client_config.signature_version
|
||||
if client_config is not None
|
||||
else None
|
||||
)
|
||||
return EndpointRulesetResolver(
|
||||
endpoint_ruleset_data=endpoints_ruleset_data,
|
||||
partition_data=partition_data,
|
||||
service_model=service_model,
|
||||
builtins=resolver_builtins,
|
||||
client_context=client_context,
|
||||
event_emitter=event_emitter,
|
||||
use_ssl=is_secure,
|
||||
requested_auth_scheme=sig_version,
|
||||
)
|
||||
|
||||
def compute_endpoint_resolver_builtin_defaults(
|
||||
self,
|
||||
region_name,
|
||||
service_name,
|
||||
s3_config,
|
||||
endpoint_bridge,
|
||||
client_endpoint_url,
|
||||
legacy_endpoint_url,
|
||||
credentials,
|
||||
account_id_endpoint_mode,
|
||||
):
|
||||
# EndpointRulesetResolver rulesets may accept an "SDK::Endpoint" as
|
||||
# input. If the endpoint_url argument of create_client() is set, it
|
||||
# always takes priority.
|
||||
if client_endpoint_url:
|
||||
given_endpoint = client_endpoint_url
|
||||
# If an endpoints.json data file other than the one bundled within
|
||||
# the botocore/data directory is used, the output of legacy
|
||||
# endpoint resolution is provided to EndpointRulesetResolver.
|
||||
elif not endpoint_bridge.resolver_uses_builtin_data():
|
||||
given_endpoint = legacy_endpoint_url
|
||||
else:
|
||||
given_endpoint = None
|
||||
|
||||
# The endpoint rulesets differ from legacy botocore behavior in whether
|
||||
# forcing path style addressing in incompatible situations raises an
|
||||
# exception or silently ignores the config setting. The
|
||||
# AWS_S3_FORCE_PATH_STYLE parameter is adjusted both here and for each
|
||||
# operation so that the ruleset behavior is backwards compatible.
|
||||
if s3_config.get('use_accelerate_endpoint', False):
|
||||
force_path_style = False
|
||||
elif client_endpoint_url is not None and not is_s3_accelerate_url(
|
||||
client_endpoint_url
|
||||
):
|
||||
force_path_style = s3_config.get('addressing_style') != 'virtual'
|
||||
else:
|
||||
force_path_style = s3_config.get('addressing_style') == 'path'
|
||||
|
||||
return {
|
||||
EPRBuiltins.AWS_REGION: region_name,
|
||||
EPRBuiltins.AWS_USE_FIPS: (
|
||||
# SDK_ENDPOINT cannot be combined with AWS_USE_FIPS
|
||||
given_endpoint is None
|
||||
# use legacy resolver's _resolve_endpoint_variant_config_var()
|
||||
# or default to False if it returns None
|
||||
and endpoint_bridge._resolve_endpoint_variant_config_var(
|
||||
'use_fips_endpoint'
|
||||
)
|
||||
or False
|
||||
),
|
||||
EPRBuiltins.AWS_USE_DUALSTACK: (
|
||||
# SDK_ENDPOINT cannot be combined with AWS_USE_DUALSTACK
|
||||
given_endpoint is None
|
||||
# use legacy resolver's _resolve_use_dualstack_endpoint() and
|
||||
# or default to False if it returns None
|
||||
and endpoint_bridge._resolve_use_dualstack_endpoint(
|
||||
service_name
|
||||
)
|
||||
or False
|
||||
),
|
||||
EPRBuiltins.AWS_STS_USE_GLOBAL_ENDPOINT: (
|
||||
self._should_set_global_sts_endpoint(
|
||||
region_name=region_name,
|
||||
endpoint_url=None,
|
||||
endpoint_config=None,
|
||||
)
|
||||
),
|
||||
EPRBuiltins.AWS_S3_USE_GLOBAL_ENDPOINT: (
|
||||
self._should_force_s3_global(region_name, s3_config)
|
||||
),
|
||||
EPRBuiltins.AWS_S3_ACCELERATE: s3_config.get(
|
||||
'use_accelerate_endpoint', False
|
||||
),
|
||||
EPRBuiltins.AWS_S3_FORCE_PATH_STYLE: force_path_style,
|
||||
EPRBuiltins.AWS_S3_USE_ARN_REGION: s3_config.get(
|
||||
'use_arn_region', True
|
||||
),
|
||||
EPRBuiltins.AWS_S3CONTROL_USE_ARN_REGION: s3_config.get(
|
||||
'use_arn_region', False
|
||||
),
|
||||
EPRBuiltins.AWS_S3_DISABLE_MRAP: s3_config.get(
|
||||
's3_disable_multiregion_access_points', False
|
||||
),
|
||||
EPRBuiltins.SDK_ENDPOINT: given_endpoint,
|
||||
EPRBuiltins.ACCOUNT_ID: credentials.get_deferred_property(
|
||||
'account_id'
|
||||
)
|
||||
if credentials
|
||||
else None,
|
||||
EPRBuiltins.ACCOUNT_ID_ENDPOINT_MODE: account_id_endpoint_mode,
|
||||
}
|
||||
|
||||
def _compute_user_agent_appid_config(self, config_kwargs):
|
||||
user_agent_appid = config_kwargs.get('user_agent_appid')
|
||||
if user_agent_appid is None:
|
||||
user_agent_appid = self._config_store.get_config_variable(
|
||||
'user_agent_appid'
|
||||
)
|
||||
if (
|
||||
user_agent_appid is not None
|
||||
and len(user_agent_appid) > USERAGENT_APPID_MAXLEN
|
||||
):
|
||||
logger.warning(
|
||||
'The configured value for user_agent_appid exceeds the '
|
||||
f'maximum length of {USERAGENT_APPID_MAXLEN} characters.'
|
||||
)
|
||||
config_kwargs['user_agent_appid'] = user_agent_appid
|
||||
|
||||
def _compute_sigv4a_signing_region_set_config(self, config_kwargs):
|
||||
sigv4a_signing_region_set = config_kwargs.get(
|
||||
'sigv4a_signing_region_set'
|
||||
)
|
||||
if sigv4a_signing_region_set is None:
|
||||
sigv4a_signing_region_set = self._config_store.get_config_variable(
|
||||
'sigv4a_signing_region_set'
|
||||
)
|
||||
config_kwargs['sigv4a_signing_region_set'] = sigv4a_signing_region_set
|
||||
|
||||
def _compute_checksum_config(self, config_kwargs):
|
||||
self._handle_checksum_config(
|
||||
config_kwargs,
|
||||
config_key="request_checksum_calculation",
|
||||
valid_options=VALID_REQUEST_CHECKSUM_CALCULATION_CONFIG,
|
||||
)
|
||||
self._handle_checksum_config(
|
||||
config_kwargs,
|
||||
config_key="response_checksum_validation",
|
||||
valid_options=VALID_RESPONSE_CHECKSUM_VALIDATION_CONFIG,
|
||||
)
|
||||
|
||||
def _handle_checksum_config(
|
||||
self,
|
||||
config_kwargs,
|
||||
config_key,
|
||||
valid_options,
|
||||
):
|
||||
value = config_kwargs.get(config_key)
|
||||
if value is None:
|
||||
value = self._config_store.get_config_variable(config_key)
|
||||
|
||||
if isinstance(value, str):
|
||||
value = value.lower()
|
||||
|
||||
if value not in valid_options:
|
||||
raise botocore.exceptions.InvalidChecksumConfigError(
|
||||
config_key=config_key,
|
||||
config_value=value,
|
||||
valid_options=valid_options,
|
||||
)
|
||||
self._register_checksum_config_feature_ids(value, config_key)
|
||||
config_kwargs[config_key] = value
|
||||
|
||||
def _register_checksum_config_feature_ids(self, value, config_key):
|
||||
checksum_config_feature_id = None
|
||||
if config_key == "request_checksum_calculation":
|
||||
checksum_config_feature_id = (
|
||||
f"FLEXIBLE_CHECKSUMS_REQ_{value.upper()}"
|
||||
)
|
||||
elif config_key == "response_checksum_validation":
|
||||
checksum_config_feature_id = (
|
||||
f"FLEXIBLE_CHECKSUMS_RES_{value.upper()}"
|
||||
)
|
||||
if checksum_config_feature_id is not None:
|
||||
register_feature_id(checksum_config_feature_id)
|
||||
|
||||
def _compute_account_id_endpoint_mode_config(self, config_kwargs):
|
||||
config_key = 'account_id_endpoint_mode'
|
||||
|
||||
# Disable account id based endpoint routing for unsigned requests
|
||||
# since there are no credentials to resolve.
|
||||
signature_version = config_kwargs.get('signature_version')
|
||||
if signature_version is botocore.UNSIGNED:
|
||||
config_kwargs[config_key] = 'disabled'
|
||||
return
|
||||
|
||||
account_id_endpoint_mode = config_kwargs.get(config_key)
|
||||
if account_id_endpoint_mode is None:
|
||||
account_id_endpoint_mode = self._config_store.get_config_variable(
|
||||
config_key
|
||||
)
|
||||
|
||||
if isinstance(account_id_endpoint_mode, str):
|
||||
account_id_endpoint_mode = account_id_endpoint_mode.lower()
|
||||
|
||||
if (
|
||||
account_id_endpoint_mode
|
||||
not in VALID_ACCOUNT_ID_ENDPOINT_MODE_CONFIG
|
||||
):
|
||||
raise botocore.exceptions.InvalidConfigError(
|
||||
error_msg=f"The configured value '{account_id_endpoint_mode}' for '{config_key}' is "
|
||||
f"invalid. Valid values are: {VALID_ACCOUNT_ID_ENDPOINT_MODE_CONFIG}."
|
||||
)
|
||||
|
||||
config_kwargs[config_key] = account_id_endpoint_mode
|
||||
|
||||
def _compute_auth_scheme_preference_config(
|
||||
self, client_config, config_kwargs
|
||||
):
|
||||
config_key = 'auth_scheme_preference'
|
||||
set_in_config_object = False
|
||||
|
||||
if client_config and client_config.auth_scheme_preference:
|
||||
value = client_config.auth_scheme_preference
|
||||
set_in_config_object = True
|
||||
else:
|
||||
value = self._config_store.get_config_variable(config_key)
|
||||
|
||||
if value is None:
|
||||
config_kwargs[config_key] = None
|
||||
return
|
||||
|
||||
if not isinstance(value, str):
|
||||
raise botocore.exceptions.InvalidConfigError(
|
||||
error_msg=(
|
||||
f"{config_key} must be a comma-delimited string. "
|
||||
f"Received {type(value)} instead: {value}."
|
||||
)
|
||||
)
|
||||
|
||||
value = ','.join(
|
||||
item.replace(' ', '').replace('\t', '')
|
||||
for item in value.split(',')
|
||||
if item.strip()
|
||||
)
|
||||
|
||||
if set_in_config_object:
|
||||
value = ClientConfigString(value)
|
||||
|
||||
config_kwargs[config_key] = value
|
||||
|
||||
def _compute_signature_version_config(self, client_config, config_kwargs):
|
||||
if client_config and client_config.signature_version:
|
||||
value = client_config.signature_version
|
||||
if isinstance(value, str):
|
||||
config_kwargs['signature_version'] = ClientConfigString(value)
|
||||
|
||||
|
||||
class ConfigObjectWrapper:
|
||||
"""Base class to mark values set via in-code Config object."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ClientConfigString(str, ConfigObjectWrapper):
|
||||
def __new__(cls, value=None):
|
||||
return super().__new__(cls, value)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,635 @@
|
||||
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import functools
|
||||
import logging
|
||||
from collections.abc import Mapping
|
||||
|
||||
import urllib3.util
|
||||
from urllib3.connection import HTTPConnection, VerifiedHTTPSConnection
|
||||
from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool
|
||||
|
||||
import botocore.utils
|
||||
from botocore.compat import (
|
||||
HTTPHeaders,
|
||||
HTTPResponse,
|
||||
MutableMapping,
|
||||
urlencode,
|
||||
urlparse,
|
||||
urlsplit,
|
||||
urlunsplit,
|
||||
)
|
||||
from botocore.exceptions import UnseekableStreamError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AWSHTTPResponse(HTTPResponse):
|
||||
# The *args, **kwargs is used because the args are slightly
|
||||
# different in py2.6 than in py2.7/py3.
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._status_tuple = kwargs.pop('status_tuple')
|
||||
HTTPResponse.__init__(self, *args, **kwargs)
|
||||
|
||||
def _read_status(self):
|
||||
if self._status_tuple is not None:
|
||||
status_tuple = self._status_tuple
|
||||
self._status_tuple = None
|
||||
return status_tuple
|
||||
else:
|
||||
return HTTPResponse._read_status(self)
|
||||
|
||||
|
||||
class AWSConnection:
|
||||
"""Mixin for HTTPConnection that supports Expect 100-continue.
|
||||
|
||||
This when mixed with a subclass of httplib.HTTPConnection (though
|
||||
technically we subclass from urllib3, which subclasses
|
||||
httplib.HTTPConnection) and we only override this class to support Expect
|
||||
100-continue, which we need for S3. As far as I can tell, this is
|
||||
general purpose enough to not be specific to S3, but I'm being
|
||||
tentative and keeping it in botocore because I've only tested
|
||||
this against AWS services.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self._original_response_cls = self.response_class
|
||||
# This variable is set when we receive an early response from the
|
||||
# server. If this value is set to True, any calls to send() are noops.
|
||||
# This value is reset to false every time _send_request is called.
|
||||
# This is to workaround changes in urllib3 2.0 which uses separate
|
||||
# send() calls in request() instead of delegating to endheaders(),
|
||||
# which is where the body is sent in CPython's HTTPConnection.
|
||||
self._response_received = False
|
||||
self._expect_header_set = False
|
||||
self._send_called = False
|
||||
|
||||
def close(self):
|
||||
super().close()
|
||||
# Reset all of our instance state we were tracking.
|
||||
self._response_received = False
|
||||
self._expect_header_set = False
|
||||
self._send_called = False
|
||||
self.response_class = self._original_response_cls
|
||||
|
||||
def request(self, method, url, body=None, headers=None, *args, **kwargs):
|
||||
if headers is None:
|
||||
headers = {}
|
||||
self._response_received = False
|
||||
if headers.get('Expect', b'') == b'100-continue':
|
||||
self._expect_header_set = True
|
||||
else:
|
||||
self._expect_header_set = False
|
||||
self.response_class = self._original_response_cls
|
||||
rval = super().request(method, url, body, headers, *args, **kwargs)
|
||||
self._expect_header_set = False
|
||||
return rval
|
||||
|
||||
def _convert_to_bytes(self, mixed_buffer):
|
||||
# Take a list of mixed str/bytes and convert it
|
||||
# all into a single bytestring.
|
||||
# Any str will be encoded as utf-8.
|
||||
bytes_buffer = []
|
||||
for chunk in mixed_buffer:
|
||||
if isinstance(chunk, str):
|
||||
bytes_buffer.append(chunk.encode('utf-8'))
|
||||
else:
|
||||
bytes_buffer.append(chunk)
|
||||
msg = b"\r\n".join(bytes_buffer)
|
||||
return msg
|
||||
|
||||
def _send_output(self, message_body=None, *args, **kwargs):
|
||||
self._buffer.extend((b"", b""))
|
||||
msg = self._convert_to_bytes(self._buffer)
|
||||
del self._buffer[:]
|
||||
# If msg and message_body are sent in a single send() call,
|
||||
# it will avoid performance problems caused by the interaction
|
||||
# between delayed ack and the Nagle algorithm.
|
||||
if isinstance(message_body, bytes):
|
||||
msg += message_body
|
||||
message_body = None
|
||||
self.send(msg)
|
||||
if self._expect_header_set:
|
||||
# This is our custom behavior. If the Expect header was
|
||||
# set, it will trigger this custom behavior.
|
||||
logger.debug("Waiting for 100 Continue response.")
|
||||
# Wait for 1 second for the server to send a response.
|
||||
if urllib3.util.wait_for_read(self.sock, 1):
|
||||
self._handle_expect_response(message_body)
|
||||
return
|
||||
else:
|
||||
# From the RFC:
|
||||
# Because of the presence of older implementations, the
|
||||
# protocol allows ambiguous situations in which a client may
|
||||
# send "Expect: 100-continue" without receiving either a 417
|
||||
# (Expectation Failed) status or a 100 (Continue) status.
|
||||
# Therefore, when a client sends this header field to an origin
|
||||
# server (possibly via a proxy) from which it has never seen a
|
||||
# 100 (Continue) status, the client SHOULD NOT wait for an
|
||||
# indefinite period before sending the request body.
|
||||
logger.debug(
|
||||
"No response seen from server, continuing to "
|
||||
"send the response body."
|
||||
)
|
||||
if message_body is not None:
|
||||
# message_body was not a string (i.e. it is a file), and
|
||||
# we must run the risk of Nagle.
|
||||
self.send(message_body)
|
||||
|
||||
def _consume_headers(self, fp):
|
||||
# Most servers (including S3) will just return
|
||||
# the CLRF after the 100 continue response. However,
|
||||
# some servers (I've specifically seen this for squid when
|
||||
# used as a straight HTTP proxy) will also inject a
|
||||
# Connection: keep-alive header. To account for this
|
||||
# we'll read until we read '\r\n', and ignore any headers
|
||||
# that come immediately after the 100 continue response.
|
||||
current = None
|
||||
while current != b'\r\n':
|
||||
current = fp.readline()
|
||||
|
||||
def _handle_expect_response(self, message_body):
|
||||
# This is called when we sent the request headers containing
|
||||
# an Expect: 100-continue header and received a response.
|
||||
# We now need to figure out what to do.
|
||||
fp = self.sock.makefile('rb', 0)
|
||||
try:
|
||||
maybe_status_line = fp.readline()
|
||||
parts = maybe_status_line.split(None, 2)
|
||||
if self._is_100_continue_status(maybe_status_line):
|
||||
self._consume_headers(fp)
|
||||
logger.debug(
|
||||
"100 Continue response seen, now sending request body."
|
||||
)
|
||||
self._send_message_body(message_body)
|
||||
elif len(parts) == 3 and parts[0].startswith(b'HTTP/'):
|
||||
# From the RFC:
|
||||
# Requirements for HTTP/1.1 origin servers:
|
||||
#
|
||||
# - Upon receiving a request which includes an Expect
|
||||
# request-header field with the "100-continue"
|
||||
# expectation, an origin server MUST either respond with
|
||||
# 100 (Continue) status and continue to read from the
|
||||
# input stream, or respond with a final status code.
|
||||
#
|
||||
# So if we don't get a 100 Continue response, then
|
||||
# whatever the server has sent back is the final response
|
||||
# and don't send the message_body.
|
||||
logger.debug(
|
||||
"Received a non 100 Continue response "
|
||||
"from the server, NOT sending request body."
|
||||
)
|
||||
status_tuple = (
|
||||
parts[0].decode('ascii'),
|
||||
int(parts[1]),
|
||||
parts[2].decode('ascii'),
|
||||
)
|
||||
response_class = functools.partial(
|
||||
AWSHTTPResponse, status_tuple=status_tuple
|
||||
)
|
||||
self.response_class = response_class
|
||||
self._response_received = True
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
def _send_message_body(self, message_body):
|
||||
if message_body is not None:
|
||||
self.send(message_body)
|
||||
|
||||
def send(self, str):
|
||||
if self._response_received:
|
||||
if not self._send_called:
|
||||
# urllib3 2.0 chunks and calls send potentially
|
||||
# thousands of times inside `request` unlike the
|
||||
# standard library. Only log this once for sanity.
|
||||
logger.debug(
|
||||
"send() called, but response already received. "
|
||||
"Not sending data."
|
||||
)
|
||||
self._send_called = True
|
||||
return
|
||||
return super().send(str)
|
||||
|
||||
def _is_100_continue_status(self, maybe_status_line):
|
||||
parts = maybe_status_line.split(None, 2)
|
||||
# Check for HTTP/<version> 100 Continue\r\n
|
||||
return (
|
||||
len(parts) >= 3
|
||||
and parts[0].startswith(b'HTTP/')
|
||||
and parts[1] == b'100'
|
||||
)
|
||||
|
||||
|
||||
class AWSHTTPConnection(AWSConnection, HTTPConnection):
|
||||
"""An HTTPConnection that supports 100 Continue behavior."""
|
||||
|
||||
|
||||
class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection):
|
||||
"""An HTTPSConnection that supports 100 Continue behavior."""
|
||||
|
||||
|
||||
class AWSHTTPConnectionPool(HTTPConnectionPool):
|
||||
ConnectionCls = AWSHTTPConnection
|
||||
|
||||
|
||||
class AWSHTTPSConnectionPool(HTTPSConnectionPool):
|
||||
ConnectionCls = AWSHTTPSConnection
|
||||
|
||||
|
||||
def prepare_request_dict(
|
||||
request_dict, endpoint_url, context=None, user_agent=None
|
||||
):
|
||||
"""
|
||||
This method prepares a request dict to be created into an
|
||||
AWSRequestObject. This prepares the request dict by adding the
|
||||
url and the user agent to the request dict.
|
||||
|
||||
:type request_dict: dict
|
||||
:param request_dict: The request dict (created from the
|
||||
``serialize`` module).
|
||||
|
||||
:type user_agent: string
|
||||
:param user_agent: The user agent to use for this request.
|
||||
|
||||
:type endpoint_url: string
|
||||
:param endpoint_url: The full endpoint url, which contains at least
|
||||
the scheme, the hostname, and optionally any path components.
|
||||
"""
|
||||
r = request_dict
|
||||
if user_agent is not None:
|
||||
headers = r['headers']
|
||||
headers['User-Agent'] = user_agent
|
||||
host_prefix = r.get('host_prefix')
|
||||
url = _urljoin(endpoint_url, r['url_path'], host_prefix)
|
||||
if r['query_string']:
|
||||
# NOTE: This is to avoid circular import with utils. This is being
|
||||
# done to avoid moving classes to different modules as to not cause
|
||||
# breaking chainges.
|
||||
percent_encode_sequence = botocore.utils.percent_encode_sequence
|
||||
encoded_query_string = percent_encode_sequence(r['query_string'])
|
||||
if '?' not in url:
|
||||
url += f'?{encoded_query_string}'
|
||||
else:
|
||||
url += f'&{encoded_query_string}'
|
||||
r['url'] = url
|
||||
r['context'] = context
|
||||
if context is None:
|
||||
r['context'] = {}
|
||||
|
||||
|
||||
def create_request_object(request_dict):
|
||||
"""
|
||||
This method takes a request dict and creates an AWSRequest object
|
||||
from it.
|
||||
|
||||
:type request_dict: dict
|
||||
:param request_dict: The request dict (created from the
|
||||
``prepare_request_dict`` method).
|
||||
|
||||
:rtype: ``botocore.awsrequest.AWSRequest``
|
||||
:return: An AWSRequest object based on the request_dict.
|
||||
|
||||
"""
|
||||
r = request_dict
|
||||
request_object = AWSRequest(
|
||||
method=r['method'],
|
||||
url=r['url'],
|
||||
data=r['body'],
|
||||
headers=r['headers'],
|
||||
auth_path=r.get('auth_path'),
|
||||
)
|
||||
request_object.context = r['context']
|
||||
return request_object
|
||||
|
||||
|
||||
def _urljoin(endpoint_url, url_path, host_prefix):
|
||||
p = urlsplit(endpoint_url)
|
||||
# <part> - <index>
|
||||
# scheme - p[0]
|
||||
# netloc - p[1]
|
||||
# path - p[2]
|
||||
# query - p[3]
|
||||
# fragment - p[4]
|
||||
if not url_path or url_path == '/':
|
||||
# If there's no path component, ensure the URL ends with
|
||||
# a '/' for backwards compatibility.
|
||||
if not p[2]:
|
||||
new_path = '/'
|
||||
else:
|
||||
new_path = p[2]
|
||||
elif p[2].endswith('/') and url_path.startswith('/'):
|
||||
new_path = p[2][:-1] + url_path
|
||||
else:
|
||||
new_path = p[2] + url_path
|
||||
|
||||
new_netloc = p[1]
|
||||
if host_prefix is not None:
|
||||
new_netloc = host_prefix + new_netloc
|
||||
|
||||
reconstructed = urlunsplit((p[0], new_netloc, new_path, p[3], p[4]))
|
||||
return reconstructed
|
||||
|
||||
|
||||
class AWSRequestPreparer:
|
||||
"""
|
||||
This class performs preparation on AWSRequest objects similar to that of
|
||||
the PreparedRequest class does in the requests library. However, the logic
|
||||
has been boiled down to meet the specific use cases in botocore. Of note
|
||||
there are the following differences:
|
||||
This class does not heavily prepare the URL. Requests performed many
|
||||
validations and corrections to ensure the URL is properly formatted.
|
||||
Botocore either performs these validations elsewhere or otherwise
|
||||
consistently provides well formatted URLs.
|
||||
|
||||
This class does not heavily prepare the body. Body preperation is
|
||||
simple and supports only the cases that we document: bytes and
|
||||
file-like objects to determine the content-length. This will also
|
||||
additionally prepare a body that is a dict to be url encoded params
|
||||
string as some signers rely on this. Finally, this class does not
|
||||
support multipart file uploads.
|
||||
|
||||
This class does not prepare the method, auth or cookies.
|
||||
"""
|
||||
|
||||
def prepare(self, original):
|
||||
method = original.method
|
||||
url = self._prepare_url(original)
|
||||
body = self._prepare_body(original)
|
||||
headers = self._prepare_headers(original, body)
|
||||
stream_output = original.stream_output
|
||||
|
||||
return AWSPreparedRequest(method, url, headers, body, stream_output)
|
||||
|
||||
def _prepare_url(self, original):
|
||||
url = original.url
|
||||
if original.params:
|
||||
url_parts = urlparse(url)
|
||||
delim = '&' if url_parts.query else '?'
|
||||
if isinstance(original.params, Mapping):
|
||||
params_to_encode = list(original.params.items())
|
||||
else:
|
||||
params_to_encode = original.params
|
||||
params = urlencode(params_to_encode, doseq=True)
|
||||
url = delim.join((url, params))
|
||||
return url
|
||||
|
||||
def _prepare_headers(self, original, prepared_body=None):
|
||||
headers = HeadersDict(original.headers.items())
|
||||
|
||||
# If the transfer encoding or content length is already set, use that
|
||||
if 'Transfer-Encoding' in headers or 'Content-Length' in headers:
|
||||
return headers
|
||||
|
||||
# Ensure we set the content length when it is expected
|
||||
if original.method not in ('GET', 'HEAD', 'OPTIONS'):
|
||||
length = self._determine_content_length(prepared_body)
|
||||
if length is not None:
|
||||
headers['Content-Length'] = str(length)
|
||||
else:
|
||||
# Failed to determine content length, using chunked
|
||||
# NOTE: This shouldn't ever happen in practice
|
||||
body_type = type(prepared_body)
|
||||
logger.debug('Failed to determine length of %s', body_type)
|
||||
headers['Transfer-Encoding'] = 'chunked'
|
||||
|
||||
return headers
|
||||
|
||||
def _to_utf8(self, item):
|
||||
key, value = item
|
||||
if isinstance(key, str):
|
||||
key = key.encode('utf-8')
|
||||
if isinstance(value, str):
|
||||
value = value.encode('utf-8')
|
||||
return key, value
|
||||
|
||||
def _prepare_body(self, original):
|
||||
"""Prepares the given HTTP body data."""
|
||||
body = original.data
|
||||
if body == b'':
|
||||
body = None
|
||||
|
||||
if isinstance(body, dict):
|
||||
params = [self._to_utf8(item) for item in body.items()]
|
||||
body = urlencode(params, doseq=True)
|
||||
|
||||
return body
|
||||
|
||||
def _determine_content_length(self, body):
|
||||
return botocore.utils.determine_content_length(body)
|
||||
|
||||
|
||||
class AWSRequest:
|
||||
"""Represents the elements of an HTTP request.
|
||||
|
||||
This class was originally inspired by requests.models.Request, but has been
|
||||
boiled down to meet the specific use cases in botocore. That being said this
|
||||
class (even in requests) is effectively a named-tuple.
|
||||
"""
|
||||
|
||||
_REQUEST_PREPARER_CLS = AWSRequestPreparer
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
method=None,
|
||||
url=None,
|
||||
headers=None,
|
||||
data=None,
|
||||
params=None,
|
||||
auth_path=None,
|
||||
stream_output=False,
|
||||
):
|
||||
self._request_preparer = self._REQUEST_PREPARER_CLS()
|
||||
|
||||
# Default empty dicts for dict params.
|
||||
params = {} if params is None else params
|
||||
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.headers = HTTPHeaders()
|
||||
self.data = data
|
||||
self.params = params
|
||||
self.auth_path = auth_path
|
||||
self.stream_output = stream_output
|
||||
|
||||
if headers is not None:
|
||||
for key, value in headers.items():
|
||||
self.headers[key] = value
|
||||
|
||||
# This is a dictionary to hold information that is used when
|
||||
# processing the request. What is inside of ``context`` is open-ended.
|
||||
# For example, it may have a timestamp key that is used for holding
|
||||
# what the timestamp is when signing the request. Note that none
|
||||
# of the information that is inside of ``context`` is directly
|
||||
# sent over the wire; the information is only used to assist in
|
||||
# creating what is sent over the wire.
|
||||
self.context = {}
|
||||
|
||||
def prepare(self):
|
||||
"""Constructs a :class:`AWSPreparedRequest <AWSPreparedRequest>`."""
|
||||
return self._request_preparer.prepare(self)
|
||||
|
||||
@property
|
||||
def body(self):
|
||||
body = self.prepare().body
|
||||
if isinstance(body, str):
|
||||
body = body.encode('utf-8')
|
||||
return body
|
||||
|
||||
|
||||
class AWSPreparedRequest:
|
||||
"""A data class representing a finalized request to be sent over the wire.
|
||||
|
||||
Requests at this stage should be treated as final, and the properties of
|
||||
the request should not be modified.
|
||||
|
||||
:ivar method: The HTTP Method
|
||||
:ivar url: The full url
|
||||
:ivar headers: The HTTP headers to send.
|
||||
:ivar body: The HTTP body.
|
||||
:ivar stream_output: If the response for this request should be streamed.
|
||||
"""
|
||||
|
||||
def __init__(self, method, url, headers, body, stream_output):
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.headers = headers
|
||||
self.body = body
|
||||
self.stream_output = stream_output
|
||||
|
||||
def __repr__(self):
|
||||
fmt = (
|
||||
'<AWSPreparedRequest stream_output=%s, method=%s, url=%s, '
|
||||
'headers=%s>'
|
||||
)
|
||||
return fmt % (self.stream_output, self.method, self.url, self.headers)
|
||||
|
||||
def reset_stream(self):
|
||||
"""Resets the streaming body to it's initial position.
|
||||
|
||||
If the request contains a streaming body (a streamable file-like object)
|
||||
seek to the object's initial position to ensure the entire contents of
|
||||
the object is sent. This is a no-op for static bytes-like body types.
|
||||
"""
|
||||
# Trying to reset a stream when there is a no stream will
|
||||
# just immediately return. It's not an error, it will produce
|
||||
# the same result as if we had actually reset the stream (we'll send
|
||||
# the entire body contents again if we need to).
|
||||
# Same case if the body is a string/bytes/bytearray type.
|
||||
|
||||
non_seekable_types = (bytes, str, bytearray)
|
||||
if self.body is None or isinstance(self.body, non_seekable_types):
|
||||
return
|
||||
try:
|
||||
logger.debug("Rewinding stream: %s", self.body)
|
||||
self.body.seek(0)
|
||||
except Exception as e:
|
||||
logger.debug("Unable to rewind stream: %s", e)
|
||||
raise UnseekableStreamError(stream_object=self.body)
|
||||
|
||||
|
||||
class AWSResponse:
|
||||
"""A data class representing an HTTP response.
|
||||
|
||||
This class was originally inspired by requests.models.Response, but has
|
||||
been boiled down to meet the specific use cases in botocore. This has
|
||||
effectively been reduced to a named tuple.
|
||||
|
||||
:ivar url: The full url.
|
||||
:ivar status_code: The status code of the HTTP response.
|
||||
:ivar headers: The HTTP headers received.
|
||||
:ivar body: The HTTP response body.
|
||||
"""
|
||||
|
||||
def __init__(self, url, status_code, headers, raw):
|
||||
self.url = url
|
||||
self.status_code = status_code
|
||||
self.headers = HeadersDict(headers)
|
||||
self.raw = raw
|
||||
|
||||
self._content = None
|
||||
|
||||
@property
|
||||
def content(self):
|
||||
"""Content of the response as bytes."""
|
||||
|
||||
if self._content is None:
|
||||
# Read the contents.
|
||||
# NOTE: requests would attempt to call stream and fall back
|
||||
# to a custom generator that would call read in a loop, but
|
||||
# we don't rely on this behavior
|
||||
self._content = b''.join(self.raw.stream()) or b''
|
||||
|
||||
return self._content
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""Content of the response as a proper text type.
|
||||
|
||||
Uses the encoding type provided in the reponse headers to decode the
|
||||
response content into a proper text type. If the encoding is not
|
||||
present in the headers, UTF-8 is used as a default.
|
||||
"""
|
||||
encoding = botocore.utils.get_encoding_from_headers(self.headers)
|
||||
if encoding:
|
||||
return self.content.decode(encoding)
|
||||
else:
|
||||
return self.content.decode('utf-8')
|
||||
|
||||
|
||||
class _HeaderKey:
|
||||
def __init__(self, key):
|
||||
self._key = key
|
||||
self._lower = key.lower()
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._lower)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, _HeaderKey) and self._lower == other._lower
|
||||
|
||||
def __str__(self):
|
||||
return self._key
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._key)
|
||||
|
||||
|
||||
class HeadersDict(MutableMapping):
|
||||
"""A case-insenseitive dictionary to represent HTTP headers."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._dict = {}
|
||||
self.update(*args, **kwargs)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._dict[_HeaderKey(key)] = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._dict[_HeaderKey(key)]
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._dict[_HeaderKey(key)]
|
||||
|
||||
def __iter__(self):
|
||||
return (str(key) for key in self._dict)
|
||||
|
||||
def __len__(self):
|
||||
return len(self._dict)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self._dict)
|
||||
|
||||
def copy(self):
|
||||
return HeadersDict(self.items())
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,357 @@
|
||||
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
import copy
|
||||
import datetime
|
||||
import sys
|
||||
import inspect
|
||||
import warnings
|
||||
import hashlib
|
||||
from http.client import HTTPMessage
|
||||
import logging
|
||||
import shlex
|
||||
import re
|
||||
import os
|
||||
from collections import OrderedDict
|
||||
from collections.abc import MutableMapping
|
||||
from math import floor
|
||||
|
||||
from botocore.vendored import six
|
||||
from botocore.exceptions import MD5UnavailableError
|
||||
from dateutil.tz import tzlocal
|
||||
from urllib3 import exceptions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class HTTPHeaders(HTTPMessage):
|
||||
pass
|
||||
|
||||
from urllib.parse import (
|
||||
quote,
|
||||
urlencode,
|
||||
unquote,
|
||||
unquote_plus,
|
||||
urlparse,
|
||||
urlsplit,
|
||||
urlunsplit,
|
||||
urljoin,
|
||||
parse_qsl,
|
||||
parse_qs,
|
||||
)
|
||||
from http.client import HTTPResponse
|
||||
from io import IOBase as _IOBase
|
||||
from base64 import encodebytes
|
||||
from email.utils import formatdate
|
||||
from itertools import zip_longest
|
||||
file_type = _IOBase
|
||||
zip = zip
|
||||
|
||||
# In python3, unquote takes a str() object, url decodes it,
|
||||
# then takes the bytestring and decodes it to utf-8.
|
||||
unquote_str = unquote_plus
|
||||
|
||||
def set_socket_timeout(http_response, timeout):
|
||||
"""Set the timeout of the socket from an HTTPResponse.
|
||||
|
||||
:param http_response: An instance of ``httplib.HTTPResponse``
|
||||
|
||||
"""
|
||||
http_response._fp.fp.raw._sock.settimeout(timeout)
|
||||
|
||||
def accepts_kwargs(func):
|
||||
return inspect.getfullargspec(func)[2]
|
||||
|
||||
def ensure_unicode(s, encoding=None, errors=None):
|
||||
# NOOP in Python 3, because every string is already unicode
|
||||
return s
|
||||
|
||||
def ensure_bytes(s, encoding='utf-8', errors='strict'):
|
||||
if isinstance(s, str):
|
||||
return s.encode(encoding, errors)
|
||||
if isinstance(s, bytes):
|
||||
return s
|
||||
raise ValueError(f"Expected str or bytes, received {type(s)}.")
|
||||
|
||||
|
||||
import xml.etree.ElementTree as ETree
|
||||
XMLParseError = ETree.ParseError
|
||||
|
||||
import json
|
||||
|
||||
|
||||
def filter_ssl_warnings():
|
||||
# Ignore warnings related to SNI as it is not being used in validations.
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
message="A true SSLContext object is not available.*",
|
||||
category=exceptions.InsecurePlatformWarning,
|
||||
module=r".*urllib3\.util\.ssl_",
|
||||
)
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, d):
|
||||
new_instance = cls()
|
||||
for key, value in d.items():
|
||||
new_instance[key] = value
|
||||
return new_instance
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_pairs(cls, pairs):
|
||||
new_instance = cls()
|
||||
for key, value in pairs:
|
||||
new_instance[key] = value
|
||||
return new_instance
|
||||
|
||||
|
||||
HTTPHeaders.from_dict = from_dict
|
||||
HTTPHeaders.from_pairs = from_pairs
|
||||
|
||||
|
||||
def copy_kwargs(kwargs):
|
||||
"""
|
||||
This used to be a compat shim for 2.6 but is now just an alias.
|
||||
"""
|
||||
copy_kwargs = copy.copy(kwargs)
|
||||
return copy_kwargs
|
||||
|
||||
|
||||
def total_seconds(delta):
|
||||
"""
|
||||
Returns the total seconds in a ``datetime.timedelta``.
|
||||
|
||||
This used to be a compat shim for 2.6 but is now just an alias.
|
||||
|
||||
:param delta: The timedelta object
|
||||
:type delta: ``datetime.timedelta``
|
||||
"""
|
||||
return delta.total_seconds()
|
||||
|
||||
|
||||
# Checks to see if md5 is available on this system. A given system might not
|
||||
# have access to it for various reasons, such as FIPS mode being enabled.
|
||||
try:
|
||||
hashlib.md5(usedforsecurity=False)
|
||||
MD5_AVAILABLE = True
|
||||
except (AttributeError, ValueError):
|
||||
MD5_AVAILABLE = False
|
||||
|
||||
|
||||
def get_md5(*args, **kwargs):
|
||||
"""
|
||||
Attempts to get an md5 hashing object.
|
||||
|
||||
:param args: Args to pass to the MD5 constructor
|
||||
:param kwargs: Key word arguments to pass to the MD5 constructor
|
||||
:return: An MD5 hashing object if available. If it is unavailable, None
|
||||
is returned if raise_error_if_unavailable is set to False.
|
||||
"""
|
||||
if MD5_AVAILABLE:
|
||||
return hashlib.md5(*args, **kwargs)
|
||||
else:
|
||||
raise MD5UnavailableError()
|
||||
|
||||
|
||||
def compat_shell_split(s, platform=None):
|
||||
if platform is None:
|
||||
platform = sys.platform
|
||||
|
||||
if platform == "win32":
|
||||
return _windows_shell_split(s)
|
||||
else:
|
||||
return shlex.split(s)
|
||||
|
||||
|
||||
def _windows_shell_split(s):
|
||||
"""Splits up a windows command as the built-in command parser would.
|
||||
|
||||
Windows has potentially bizarre rules depending on where you look. When
|
||||
spawning a process via the Windows C runtime (which is what python does
|
||||
when you call popen) the rules are as follows:
|
||||
|
||||
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
|
||||
|
||||
To summarize:
|
||||
|
||||
* Only space and tab are valid delimiters
|
||||
* Double quotes are the only valid quotes
|
||||
* Backslash is interpreted literally unless it is part of a chain that
|
||||
leads up to a double quote. Then the backslashes escape the backslashes,
|
||||
and if there is an odd number the final backslash escapes the quote.
|
||||
|
||||
:param s: The command string to split up into parts.
|
||||
:return: A list of command components.
|
||||
"""
|
||||
if not s:
|
||||
return []
|
||||
|
||||
components = []
|
||||
buff = []
|
||||
is_quoted = False
|
||||
num_backslashes = 0
|
||||
for character in s:
|
||||
if character == '\\':
|
||||
# We can't simply append backslashes because we don't know if
|
||||
# they are being used as escape characters or not. Instead we
|
||||
# keep track of how many we've encountered and handle them when
|
||||
# we encounter a different character.
|
||||
num_backslashes += 1
|
||||
elif character == '"':
|
||||
if num_backslashes > 0:
|
||||
# The backslashes are in a chain leading up to a double
|
||||
# quote, so they are escaping each other.
|
||||
buff.append('\\' * int(floor(num_backslashes / 2)))
|
||||
remainder = num_backslashes % 2
|
||||
num_backslashes = 0
|
||||
if remainder == 1:
|
||||
# The number of backslashes is uneven, so they are also
|
||||
# escaping the double quote, so it needs to be added to
|
||||
# the current component buffer.
|
||||
buff.append('"')
|
||||
continue
|
||||
|
||||
# We've encountered a double quote that is not escaped,
|
||||
# so we toggle is_quoted.
|
||||
is_quoted = not is_quoted
|
||||
|
||||
# If there are quotes, then we may want an empty string. To be
|
||||
# safe, we add an empty string to the buffer so that we make
|
||||
# sure it sticks around if there's nothing else between quotes.
|
||||
# If there is other stuff between quotes, the empty string will
|
||||
# disappear during the joining process.
|
||||
buff.append('')
|
||||
elif character in [' ', '\t'] and not is_quoted:
|
||||
# Since the backslashes aren't leading up to a quote, we put in
|
||||
# the exact number of backslashes.
|
||||
if num_backslashes > 0:
|
||||
buff.append('\\' * num_backslashes)
|
||||
num_backslashes = 0
|
||||
|
||||
# Excess whitespace is ignored, so only add the components list
|
||||
# if there is anything in the buffer.
|
||||
if buff:
|
||||
components.append(''.join(buff))
|
||||
buff = []
|
||||
else:
|
||||
# Since the backslashes aren't leading up to a quote, we put in
|
||||
# the exact number of backslashes.
|
||||
if num_backslashes > 0:
|
||||
buff.append('\\' * num_backslashes)
|
||||
num_backslashes = 0
|
||||
buff.append(character)
|
||||
|
||||
# Quotes must be terminated.
|
||||
if is_quoted:
|
||||
raise ValueError(f"No closing quotation in string: {s}")
|
||||
|
||||
# There may be some leftover backslashes, so we need to add them in.
|
||||
# There's no quote so we add the exact number.
|
||||
if num_backslashes > 0:
|
||||
buff.append('\\' * num_backslashes)
|
||||
|
||||
# Add the final component in if there is anything in the buffer.
|
||||
if buff:
|
||||
components.append(''.join(buff))
|
||||
|
||||
return components
|
||||
|
||||
|
||||
def get_tzinfo_options():
|
||||
# Due to dateutil/dateutil#197, Windows may fail to parse times in the past
|
||||
# with the system clock. We can alternatively fallback to tzwininfo when
|
||||
# this happens, which will get time info from the Windows registry.
|
||||
if sys.platform == 'win32':
|
||||
from dateutil.tz import tzwinlocal
|
||||
|
||||
return (tzlocal, tzwinlocal)
|
||||
else:
|
||||
return (tzlocal,)
|
||||
|
||||
|
||||
# Detect if CRT is available for use
|
||||
try:
|
||||
import awscrt.auth
|
||||
|
||||
# Allow user opt-out if needed
|
||||
disabled = os.environ.get('BOTO_DISABLE_CRT', "false")
|
||||
HAS_CRT = not disabled.lower() == 'true'
|
||||
except ImportError:
|
||||
HAS_CRT = False
|
||||
|
||||
|
||||
def has_minimum_crt_version(minimum_version):
|
||||
"""Not intended for use outside botocore."""
|
||||
if not HAS_CRT:
|
||||
return False
|
||||
|
||||
crt_version_str = awscrt.__version__
|
||||
try:
|
||||
crt_version_ints = map(int, crt_version_str.split("."))
|
||||
crt_version_tuple = tuple(crt_version_ints)
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
|
||||
return crt_version_tuple >= minimum_version
|
||||
|
||||
|
||||
########################################################
|
||||
# urllib3 compat backports #
|
||||
########################################################
|
||||
|
||||
# Vendoring IPv6 validation regex patterns from urllib3
|
||||
# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py
|
||||
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
|
||||
IPV4_RE = re.compile("^" + IPV4_PAT + "$")
|
||||
HEX_PAT = "[0-9A-Fa-f]{1,4}"
|
||||
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
|
||||
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
|
||||
_variations = [
|
||||
# 6( h16 ":" ) ls32
|
||||
"(?:%(hex)s:){6}%(ls32)s",
|
||||
# "::" 5( h16 ":" ) ls32
|
||||
"::(?:%(hex)s:){5}%(ls32)s",
|
||||
# [ h16 ] "::" 4( h16 ":" ) ls32
|
||||
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
|
||||
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
|
||||
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
|
||||
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
|
||||
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
|
||||
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
|
||||
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
|
||||
# [ *4( h16 ":" ) h16 ] "::" ls32
|
||||
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
|
||||
# [ *5( h16 ":" ) h16 ] "::" h16
|
||||
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
|
||||
# [ *6( h16 ":" ) h16 ] "::"
|
||||
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
|
||||
]
|
||||
|
||||
UNRESERVED_PAT = (
|
||||
r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
|
||||
)
|
||||
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
|
||||
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
|
||||
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
|
||||
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
|
||||
|
||||
# These are the characters that are stripped by post-bpo-43882 urlparse().
|
||||
UNSAFE_URL_CHARS = frozenset('\t\r\n')
|
||||
|
||||
# Detect if gzip is available for use
|
||||
try:
|
||||
import gzip
|
||||
HAS_GZIP = True
|
||||
except ImportError:
|
||||
HAS_GZIP = False
|
||||
@@ -0,0 +1,128 @@
|
||||
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
"""
|
||||
NOTE: All functions in this module are considered private and are
|
||||
subject to abrupt breaking changes. Please do not use them directly.
|
||||
|
||||
"""
|
||||
|
||||
import io
|
||||
import logging
|
||||
from gzip import GzipFile
|
||||
from gzip import compress as gzip_compress
|
||||
|
||||
from botocore.compat import urlencode
|
||||
from botocore.useragent import register_feature_id
|
||||
from botocore.utils import determine_content_length
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def maybe_compress_request(config, request_dict, operation_model):
|
||||
"""Attempt to compress the request body using the modeled encodings."""
|
||||
if _should_compress_request(config, request_dict, operation_model):
|
||||
for encoding in operation_model.request_compression['encodings']:
|
||||
encoder = COMPRESSION_MAPPING.get(encoding)
|
||||
if encoder is not None:
|
||||
logger.debug('Compressing request with %s encoding.', encoding)
|
||||
request_dict['body'] = encoder(request_dict['body'])
|
||||
_set_compression_header(request_dict['headers'], encoding)
|
||||
return
|
||||
else:
|
||||
logger.debug('Unsupported compression encoding: %s', encoding)
|
||||
|
||||
|
||||
def _should_compress_request(config, request_dict, operation_model):
|
||||
if (
|
||||
config.disable_request_compression is not True
|
||||
and config.signature_version != 'v2'
|
||||
and operation_model.request_compression is not None
|
||||
):
|
||||
if not _is_compressible_type(request_dict):
|
||||
body_type = type(request_dict['body'])
|
||||
log_msg = 'Body type %s does not support compression.'
|
||||
logger.debug(log_msg, body_type)
|
||||
return False
|
||||
|
||||
if operation_model.has_streaming_input:
|
||||
streaming_input = operation_model.get_streaming_input()
|
||||
streaming_metadata = streaming_input.metadata
|
||||
return 'requiresLength' not in streaming_metadata
|
||||
|
||||
body_size = _get_body_size(request_dict['body'])
|
||||
min_size = config.request_min_compression_size_bytes
|
||||
return min_size <= body_size
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _is_compressible_type(request_dict):
|
||||
body = request_dict['body']
|
||||
# Coerce dict to a format compatible with compression.
|
||||
if isinstance(body, dict):
|
||||
body = urlencode(body, doseq=True, encoding='utf-8').encode('utf-8')
|
||||
request_dict['body'] = body
|
||||
is_supported_type = isinstance(body, (str, bytes, bytearray))
|
||||
return is_supported_type or hasattr(body, 'read')
|
||||
|
||||
|
||||
def _get_body_size(body):
|
||||
size = determine_content_length(body)
|
||||
if size is None:
|
||||
logger.debug(
|
||||
'Unable to get length of the request body: %s. '
|
||||
'Skipping compression.',
|
||||
body,
|
||||
)
|
||||
size = 0
|
||||
return size
|
||||
|
||||
|
||||
def _gzip_compress_body(body):
|
||||
register_feature_id('GZIP_REQUEST_COMPRESSION')
|
||||
if isinstance(body, str):
|
||||
return gzip_compress(body.encode('utf-8'))
|
||||
elif isinstance(body, (bytes, bytearray)):
|
||||
return gzip_compress(body)
|
||||
elif hasattr(body, 'read'):
|
||||
if hasattr(body, 'seek') and hasattr(body, 'tell'):
|
||||
current_position = body.tell()
|
||||
compressed_obj = _gzip_compress_fileobj(body)
|
||||
body.seek(current_position)
|
||||
return compressed_obj
|
||||
return _gzip_compress_fileobj(body)
|
||||
|
||||
|
||||
def _gzip_compress_fileobj(body):
|
||||
compressed_obj = io.BytesIO()
|
||||
with GzipFile(fileobj=compressed_obj, mode='wb') as gz:
|
||||
while True:
|
||||
chunk = body.read(8192)
|
||||
if not chunk:
|
||||
break
|
||||
if isinstance(chunk, str):
|
||||
chunk = chunk.encode('utf-8')
|
||||
gz.write(chunk)
|
||||
compressed_obj.seek(0)
|
||||
return compressed_obj
|
||||
|
||||
|
||||
def _set_compression_header(headers, encoding):
|
||||
ce_header = headers.get('Content-Encoding')
|
||||
if ce_header is None:
|
||||
headers['Content-Encoding'] = encoding
|
||||
else:
|
||||
headers['Content-Encoding'] = f'{ce_header},{encoding}'
|
||||
|
||||
|
||||
COMPRESSION_MAPPING = {'gzip': _gzip_compress_body}
|
||||
@@ -0,0 +1,475 @@
|
||||
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import copy
|
||||
|
||||
from botocore.compat import OrderedDict
|
||||
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
|
||||
from botocore.exceptions import (
|
||||
InvalidMaxRetryAttemptsError,
|
||||
InvalidRetryConfigurationError,
|
||||
InvalidRetryModeError,
|
||||
InvalidS3AddressingStyleError,
|
||||
)
|
||||
|
||||
|
||||
class Config:
|
||||
"""Advanced configuration for Botocore clients.
|
||||
|
||||
:type region_name: str
|
||||
:param region_name: The region to use in instantiating the client
|
||||
|
||||
:type signature_version: str
|
||||
:param signature_version: The signature version when signing requests.
|
||||
|
||||
:type user_agent: str
|
||||
:param user_agent: The value to use in the User-Agent header.
|
||||
|
||||
:type user_agent_extra: str
|
||||
:param user_agent_extra: The value to append to the current User-Agent
|
||||
header value.
|
||||
|
||||
:type user_agent_appid: str
|
||||
:param user_agent_appid: A value that gets included in the User-Agent
|
||||
string in the format "app/<user_agent_appid>". Allowed characters are
|
||||
ASCII alphanumerics and ``!$%&'*+-.^_`|~``. All other characters will
|
||||
be replaced by a ``-``.
|
||||
|
||||
:type connect_timeout: float or int
|
||||
:param connect_timeout: The time in seconds till a timeout exception is
|
||||
thrown when attempting to make a connection. The default is 60
|
||||
seconds.
|
||||
|
||||
:type read_timeout: float or int
|
||||
:param read_timeout: The time in seconds till a timeout exception is
|
||||
thrown when attempting to read from a connection. The default is
|
||||
60 seconds.
|
||||
|
||||
:type parameter_validation: bool
|
||||
:param parameter_validation: Whether parameter validation should occur
|
||||
when serializing requests. The default is True. You can disable
|
||||
parameter validation for performance reasons. Otherwise, it's
|
||||
recommended to leave parameter validation enabled.
|
||||
|
||||
:type max_pool_connections: int
|
||||
:param max_pool_connections: The maximum number of connections to
|
||||
keep in a connection pool. If this value is not set, the default
|
||||
value of 10 is used.
|
||||
|
||||
:type proxies: dict
|
||||
:param proxies: A dictionary of proxy servers to use by protocol or
|
||||
endpoint, e.g.:
|
||||
``{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}``.
|
||||
The proxies are used on each request.
|
||||
|
||||
:type proxies_config: dict
|
||||
:param proxies_config: A dictionary of additional proxy configurations.
|
||||
Valid keys are:
|
||||
|
||||
* ``proxy_ca_bundle`` -- The path to a custom certificate bundle to use
|
||||
when establishing SSL/TLS connections with proxy.
|
||||
|
||||
* ``proxy_client_cert`` -- The path to a certificate for proxy
|
||||
TLS client authentication.
|
||||
|
||||
When a string is provided it is treated as a path to a proxy client
|
||||
certificate. When a two element tuple is provided, it will be
|
||||
interpreted as the path to the client certificate, and the path
|
||||
to the certificate key.
|
||||
|
||||
* ``proxy_use_forwarding_for_https`` -- For HTTPS proxies,
|
||||
forward your requests to HTTPS destinations with an absolute
|
||||
URI. We strongly recommend you only use this option with
|
||||
trusted or corporate proxies. Value must be boolean.
|
||||
|
||||
:type s3: dict
|
||||
:param s3: A dictionary of S3 specific configurations.
|
||||
Valid keys are:
|
||||
|
||||
* ``use_accelerate_endpoint`` -- Refers to whether to use the S3
|
||||
Accelerate endpoint. The value must be a boolean. If True, the
|
||||
client will use the S3 Accelerate endpoint. If the S3 Accelerate
|
||||
endpoint is being used then the addressing style will always
|
||||
be virtual.
|
||||
|
||||
* ``payload_signing_enabled`` -- Refers to whether or not to SHA256
|
||||
sign sigv4 payloads. By default, this is disabled for streaming
|
||||
uploads (UploadPart and PutObject).
|
||||
|
||||
* ``addressing_style`` -- Refers to the style in which to address
|
||||
s3 endpoints. Values must be a string that equals one of:
|
||||
|
||||
* ``auto`` -- Addressing style is chosen for user. Depending
|
||||
on the configuration of client, the endpoint may be addressed in
|
||||
the virtual or the path style. Note that this is the default
|
||||
behavior if no style is specified.
|
||||
|
||||
* ``virtual`` -- Addressing style is always virtual. The name of the
|
||||
bucket must be DNS compatible or an exception will be thrown.
|
||||
Endpoints will be addressed as such: ``amzn-s3-demo-bucket.s3.amazonaws.com``
|
||||
|
||||
* ``path`` -- Addressing style is always by path. Endpoints will be
|
||||
addressed as such: ``s3.amazonaws.com/amzn-s3-demo-bucket``
|
||||
|
||||
* ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use
|
||||
when the region is configured to be us-east-1. Values must be a
|
||||
string that equals:
|
||||
|
||||
* ``regional`` -- Use the us-east-1.amazonaws.com endpoint if the
|
||||
client is configured to use the us-east-1 region.
|
||||
|
||||
* ``legacy`` -- Use the s3.amazonaws.com endpoint if the client is
|
||||
configured to use the us-east-1 region. This is the default if
|
||||
the configuration option is not specified.
|
||||
|
||||
|
||||
:type retries: dict
|
||||
:param retries: A dictionary for configuration related to retry behavior.
|
||||
Valid keys are:
|
||||
|
||||
* ``total_max_attempts`` -- An integer representing the maximum number of
|
||||
total attempts that will be made on a single request. This includes
|
||||
the initial request, so a value of 1 indicates that no requests
|
||||
will be retried. If ``total_max_attempts`` and ``max_attempts``
|
||||
are both provided, ``total_max_attempts`` takes precedence.
|
||||
``total_max_attempts`` is preferred over ``max_attempts`` because
|
||||
it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and
|
||||
the ``max_attempts`` config file value.
|
||||
* ``max_attempts`` -- An integer representing the maximum number of
|
||||
retry attempts that will be made on a single request. For
|
||||
example, setting this value to 2 will result in the request
|
||||
being retried at most two times after the initial request. Setting
|
||||
this value to 0 will result in no retries ever being attempted after
|
||||
the initial request. If not provided, the number of retries will
|
||||
default to the value specified in the service model, which is
|
||||
typically four retries.
|
||||
* ``mode`` -- A string representing the type of retry mode botocore
|
||||
should use. Valid values are:
|
||||
|
||||
* ``legacy`` - The pre-existing retry behavior.
|
||||
|
||||
* ``standard`` - The standardized set of retry rules. This will also
|
||||
default to 3 max attempts unless overridden.
|
||||
|
||||
* ``adaptive`` - Retries with additional client side throttling.
|
||||
|
||||
:type client_cert: str, (str, str)
|
||||
:param client_cert: The path to a certificate for TLS client authentication.
|
||||
|
||||
When a string is provided it is treated as a path to a client
|
||||
certificate to be used when creating a TLS connection.
|
||||
|
||||
If a client key is to be provided alongside the client certificate the
|
||||
client_cert should be set to a tuple of length two where the first
|
||||
element is the path to the client certificate and the second element is
|
||||
the path to the certificate key.
|
||||
|
||||
:type inject_host_prefix: bool
|
||||
:param inject_host_prefix: Whether host prefix injection should occur.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
The default of None is equivalent to setting to True, which enables
|
||||
the injection of operation parameters into the prefix of the hostname.
|
||||
Setting this to False disables the injection of operation parameters
|
||||
into the prefix of the hostname. Setting this to False is useful for
|
||||
clients providing custom endpoints that should not have their host
|
||||
prefix modified.
|
||||
|
||||
:type use_dualstack_endpoint: bool
|
||||
:param use_dualstack_endpoint: Setting to True enables dualstack
|
||||
endpoint resolution.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type use_fips_endpoint: bool
|
||||
:param use_fips_endpoint: Setting to True enables fips
|
||||
endpoint resolution.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type ignore_configured_endpoint_urls: bool
|
||||
:param ignore_configured_endpoint_urls: Setting to True disables use
|
||||
of endpoint URLs provided via environment variables and
|
||||
the shared configuration file.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type tcp_keepalive: bool
|
||||
:param tcp_keepalive: Enables the TCP Keep-Alive socket option used when
|
||||
creating new connections if set to True.
|
||||
|
||||
Defaults to False.
|
||||
|
||||
:type request_min_compression_size_bytes: int
|
||||
:param request_min_compression_size_bytes: The minimum size in bytes that a
|
||||
request body should be to trigger compression. All requests with
|
||||
streaming input that don't contain the ``requiresLength`` trait will be
|
||||
compressed regardless of this setting.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type disable_request_compression: bool
|
||||
:param disable_request_compression: Disables request body compression if
|
||||
set to True.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type sigv4a_signing_region_set: string
|
||||
:param sigv4a_signing_region_set: A set of AWS regions to apply the signature for
|
||||
when using SigV4a for signing. Set to ``*`` to represent all regions.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type client_context_params: dict
|
||||
:param client_context_params: A dictionary of parameters specific to
|
||||
individual services. If available, valid parameters can be found in
|
||||
the ``Client Context Parameters`` section of the service client's
|
||||
documentation. Invalid parameters or ones that are not used by the
|
||||
specified service will be ignored.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type request_checksum_calculation: str
|
||||
:param request_checksum_calculation: Determines when a checksum will be
|
||||
calculated for request payloads. Valid values are:
|
||||
|
||||
* ``when_supported`` -- When set, a checksum will be calculated for
|
||||
all request payloads of operations modeled with the ``httpChecksum``
|
||||
trait where ``requestChecksumRequired`` is ``true`` or a
|
||||
``requestAlgorithmMember`` is modeled.
|
||||
|
||||
* ``when_required`` -- When set, a checksum will only be calculated
|
||||
for request payloads of operations modeled with the ``httpChecksum``
|
||||
trait where ``requestChecksumRequired`` is ``true`` or where a
|
||||
``requestAlgorithmMember`` is modeled and supplied.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type response_checksum_validation: str
|
||||
:param response_checksum_validation: Determines when checksum validation
|
||||
will be performed on response payloads. Valid values are:
|
||||
|
||||
* ``when_supported`` -- When set, checksum validation is performed on
|
||||
all response payloads of operations modeled with the ``httpChecksum``
|
||||
trait where ``responseAlgorithms`` is modeled, except when no modeled
|
||||
checksum algorithms are supported.
|
||||
|
||||
* ``when_required`` -- When set, checksum validation is not performed
|
||||
on response payloads of operations unless the checksum algorithm is
|
||||
supported and the ``requestValidationModeMember`` member is set to ``ENABLED``.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type account_id_endpoint_mode: str
|
||||
:param account_id_endpoint_mode: The value used to determine the client's
|
||||
behavior for account ID based endpoint routing. Valid values are:
|
||||
|
||||
* ``preferred`` - The endpoint should include account ID if available.
|
||||
* ``disabled`` - A resolved endpoint does not include account ID.
|
||||
* ``required`` - The endpoint must include account ID. If the account ID
|
||||
isn't available, an exception will be raised.
|
||||
|
||||
If a value is not provided, the client will default to ``preferred``.
|
||||
|
||||
Defaults to None.
|
||||
|
||||
:type auth_scheme_preference: str
|
||||
:param auth_scheme_preference: A comma-delimited string of case-sensitive
|
||||
auth scheme names used to determine the client's auth scheme preference.
|
||||
|
||||
Defaults to None.
|
||||
"""
|
||||
|
||||
OPTION_DEFAULTS = OrderedDict(
|
||||
[
|
||||
('region_name', None),
|
||||
('signature_version', None),
|
||||
('user_agent', None),
|
||||
('user_agent_extra', None),
|
||||
('user_agent_appid', None),
|
||||
('connect_timeout', DEFAULT_TIMEOUT),
|
||||
('read_timeout', DEFAULT_TIMEOUT),
|
||||
('parameter_validation', True),
|
||||
('max_pool_connections', MAX_POOL_CONNECTIONS),
|
||||
('proxies', None),
|
||||
('proxies_config', None),
|
||||
('s3', None),
|
||||
('retries', None),
|
||||
('client_cert', None),
|
||||
('inject_host_prefix', None),
|
||||
('endpoint_discovery_enabled', None),
|
||||
('use_dualstack_endpoint', None),
|
||||
('use_fips_endpoint', None),
|
||||
('ignore_configured_endpoint_urls', None),
|
||||
('defaults_mode', None),
|
||||
('tcp_keepalive', None),
|
||||
('request_min_compression_size_bytes', None),
|
||||
('disable_request_compression', None),
|
||||
('client_context_params', None),
|
||||
('sigv4a_signing_region_set', None),
|
||||
('request_checksum_calculation', None),
|
||||
('response_checksum_validation', None),
|
||||
('account_id_endpoint_mode', None),
|
||||
('auth_scheme_preference', None),
|
||||
]
|
||||
)
|
||||
|
||||
NON_LEGACY_OPTION_DEFAULTS = {
|
||||
'connect_timeout': None,
|
||||
}
|
||||
|
||||
# The original default value of the inject_host_prefix parameter was True.
|
||||
# This prevented the ability to override the value from other locations in
|
||||
# the parameter provider chain, like env vars or the shared configuration
|
||||
# file. TO accomplish this, we need to disambiguate when the value was set
|
||||
# by the user or not. This overrides the parameter with a property so the
|
||||
# default value of inject_host_prefix is still True if it is not set by the
|
||||
# user.
|
||||
@property
|
||||
def inject_host_prefix(self):
|
||||
if self._inject_host_prefix == "UNSET":
|
||||
return True
|
||||
|
||||
return self._inject_host_prefix
|
||||
|
||||
# Override the setter for the case where the user does supply a value;
|
||||
# _inject_host_prefix will no longer be "UNSET".
|
||||
@inject_host_prefix.setter
|
||||
def inject_host_prefix(self, value):
|
||||
self._inject_host_prefix = value
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._user_provided_options = self._record_user_provided_options(
|
||||
args, kwargs
|
||||
)
|
||||
|
||||
# By default, we use a value that indicates the user did not
|
||||
# set it. This value MUST persist on the Config object to be used
|
||||
# elsewhere.
|
||||
self._inject_host_prefix = 'UNSET'
|
||||
|
||||
# Merge the user_provided options onto the default options
|
||||
config_vars = copy.copy(self.OPTION_DEFAULTS)
|
||||
defaults_mode = self._user_provided_options.get(
|
||||
'defaults_mode', 'legacy'
|
||||
)
|
||||
if defaults_mode != 'legacy':
|
||||
config_vars.update(self.NON_LEGACY_OPTION_DEFAULTS)
|
||||
|
||||
config_vars.update(self._user_provided_options)
|
||||
|
||||
# Set the attributes based on the config_vars
|
||||
for key, value in config_vars.items():
|
||||
# Default values for the Config object are set here. We don't want
|
||||
# to use `setattr` in the case where the user already supplied a
|
||||
# value.
|
||||
if (
|
||||
key == 'inject_host_prefix'
|
||||
and 'inject_host_prefix'
|
||||
not in self._user_provided_options.keys()
|
||||
):
|
||||
continue
|
||||
setattr(self, key, value)
|
||||
|
||||
# Validate the s3 options
|
||||
self._validate_s3_configuration(self.s3)
|
||||
|
||||
self._validate_retry_configuration(self.retries)
|
||||
|
||||
def _record_user_provided_options(self, args, kwargs):
|
||||
option_order = list(self.OPTION_DEFAULTS)
|
||||
user_provided_options = {}
|
||||
|
||||
# Iterate through the kwargs passed through to the constructor and
|
||||
# map valid keys to the dictionary
|
||||
for key, value in kwargs.items():
|
||||
if key in self.OPTION_DEFAULTS:
|
||||
user_provided_options[key] = value
|
||||
# The key must exist in the available options
|
||||
else:
|
||||
raise TypeError(f"Got unexpected keyword argument '{key}'")
|
||||
|
||||
# The number of args should not be longer than the allowed
|
||||
# options
|
||||
if len(args) > len(option_order):
|
||||
raise TypeError(
|
||||
f"Takes at most {len(option_order)} arguments ({len(args)} given)"
|
||||
)
|
||||
|
||||
# Iterate through the args passed through to the constructor and map
|
||||
# them to appropriate keys.
|
||||
for i, arg in enumerate(args):
|
||||
# If a kwarg was specified for the arg, then error out
|
||||
if option_order[i] in user_provided_options:
|
||||
raise TypeError(
|
||||
f"Got multiple values for keyword argument '{option_order[i]}'"
|
||||
)
|
||||
user_provided_options[option_order[i]] = arg
|
||||
|
||||
return user_provided_options
|
||||
|
||||
def _validate_s3_configuration(self, s3):
|
||||
if s3 is not None:
|
||||
addressing_style = s3.get('addressing_style')
|
||||
if addressing_style not in ['virtual', 'auto', 'path', None]:
|
||||
raise InvalidS3AddressingStyleError(
|
||||
s3_addressing_style=addressing_style
|
||||
)
|
||||
|
||||
def _validate_retry_configuration(self, retries):
|
||||
valid_options = ('max_attempts', 'mode', 'total_max_attempts')
|
||||
valid_modes = ('legacy', 'standard', 'adaptive')
|
||||
if retries is not None:
|
||||
for key, value in retries.items():
|
||||
if key not in valid_options:
|
||||
raise InvalidRetryConfigurationError(
|
||||
retry_config_option=key,
|
||||
valid_options=valid_options,
|
||||
)
|
||||
if key == 'max_attempts' and value < 0:
|
||||
raise InvalidMaxRetryAttemptsError(
|
||||
provided_max_attempts=value,
|
||||
min_value=0,
|
||||
)
|
||||
if key == 'total_max_attempts' and value < 1:
|
||||
raise InvalidMaxRetryAttemptsError(
|
||||
provided_max_attempts=value,
|
||||
min_value=1,
|
||||
)
|
||||
if key == 'mode' and value not in valid_modes:
|
||||
raise InvalidRetryModeError(
|
||||
provided_retry_mode=value,
|
||||
valid_modes=valid_modes,
|
||||
)
|
||||
|
||||
def merge(self, other_config):
|
||||
"""Merges the config object with another config object
|
||||
|
||||
This will merge in all non-default values from the provided config
|
||||
and return a new config object
|
||||
|
||||
:type other_config: botocore.config.Config
|
||||
:param other config: Another config object to merge with. The values
|
||||
in the provided config object will take precedence in the merging
|
||||
|
||||
:returns: A config object built from the merged values of both
|
||||
config objects.
|
||||
"""
|
||||
# Make a copy of the current attributes in the config object.
|
||||
config_options = copy.copy(self._user_provided_options)
|
||||
|
||||
# Merge in the user provided options from the other config
|
||||
config_options.update(other_config._user_provided_options)
|
||||
|
||||
# Return a new config object with the merged properties.
|
||||
return Config(**config_options)
|
||||
@@ -0,0 +1,287 @@
|
||||
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
|
||||
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import configparser
|
||||
import copy
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
import botocore.exceptions
|
||||
|
||||
|
||||
def multi_file_load_config(*filenames):
|
||||
"""Load and combine multiple INI configs with profiles.
|
||||
|
||||
This function will take a list of filesnames and return
|
||||
a single dictionary that represents the merging of the loaded
|
||||
config files.
|
||||
|
||||
If any of the provided filenames does not exist, then that file
|
||||
is ignored. It is therefore ok to provide a list of filenames,
|
||||
some of which may not exist.
|
||||
|
||||
Configuration files are **not** deep merged, only the top level
|
||||
keys are merged. The filenames should be passed in order of
|
||||
precedence. The first config file has precedence over the
|
||||
second config file, which has precedence over the third config file,
|
||||
etc. The only exception to this is that the "profiles" key is
|
||||
merged to combine profiles from multiple config files into a
|
||||
single profiles mapping. However, if a profile is defined in
|
||||
multiple config files, then the config file with the highest
|
||||
precedence is used. Profile values themselves are not merged.
|
||||
For example::
|
||||
|
||||
FileA FileB FileC
|
||||
[foo] [foo] [bar]
|
||||
a=1 a=2 a=3
|
||||
b=2
|
||||
|
||||
[bar] [baz] [profile a]
|
||||
a=2 a=3 region=e
|
||||
|
||||
[profile a] [profile b] [profile c]
|
||||
region=c region=d region=f
|
||||
|
||||
The final result of ``multi_file_load_config(FileA, FileB, FileC)``
|
||||
would be::
|
||||
|
||||
{"foo": {"a": 1}, "bar": {"a": 2}, "baz": {"a": 3},
|
||||
"profiles": {"a": {"region": "c"}}, {"b": {"region": d"}},
|
||||
{"c": {"region": "f"}}}
|
||||
|
||||
Note that the "foo" key comes from A, even though it's defined in both
|
||||
FileA and FileB. Because "foo" was defined in FileA first, then the values
|
||||
for "foo" from FileA are used and the values for "foo" from FileB are
|
||||
ignored. Also note where the profiles originate from. Profile "a"
|
||||
comes FileA, profile "b" comes from FileB, and profile "c" comes
|
||||
from FileC.
|
||||
|
||||
"""
|
||||
configs = []
|
||||
profiles = []
|
||||
for filename in filenames:
|
||||
try:
|
||||
loaded = load_config(filename)
|
||||
except botocore.exceptions.ConfigNotFound:
|
||||
continue
|
||||
profiles.append(loaded.pop('profiles'))
|
||||
configs.append(loaded)
|
||||
merged_config = _merge_list_of_dicts(configs)
|
||||
merged_profiles = _merge_list_of_dicts(profiles)
|
||||
merged_config['profiles'] = merged_profiles
|
||||
return merged_config
|
||||
|
||||
|
||||
def _merge_list_of_dicts(list_of_dicts):
|
||||
merged_dicts = {}
|
||||
for single_dict in list_of_dicts:
|
||||
for key, value in single_dict.items():
|
||||
if key not in merged_dicts:
|
||||
merged_dicts[key] = value
|
||||
return merged_dicts
|
||||
|
||||
|
||||
def load_config(config_filename):
|
||||
"""Parse a INI config with profiles.
|
||||
|
||||
This will parse an INI config file and map top level profiles
|
||||
into a top level "profile" key.
|
||||
|
||||
If you want to parse an INI file and map all section names to
|
||||
top level keys, use ``raw_config_parse`` instead.
|
||||
|
||||
"""
|
||||
parsed = raw_config_parse(config_filename)
|
||||
return build_profile_map(parsed)
|
||||
|
||||
|
||||
def raw_config_parse(config_filename, parse_subsections=True):
|
||||
"""Returns the parsed INI config contents.
|
||||
|
||||
Each section name is a top level key.
|
||||
|
||||
:param config_filename: The name of the INI file to parse
|
||||
|
||||
:param parse_subsections: If True, parse indented blocks as
|
||||
subsections that represent their own configuration dictionary.
|
||||
For example, if the config file had the contents::
|
||||
|
||||
s3 =
|
||||
signature_version = s3v4
|
||||
addressing_style = path
|
||||
|
||||
The resulting ``raw_config_parse`` would be::
|
||||
|
||||
{'s3': {'signature_version': 's3v4', 'addressing_style': 'path'}}
|
||||
|
||||
If False, do not try to parse subsections and return the indented
|
||||
block as its literal value::
|
||||
|
||||
{'s3': '\nsignature_version = s3v4\naddressing_style = path'}
|
||||
|
||||
:returns: A dict with keys for each profile found in the config
|
||||
file and the value of each key being a dict containing name
|
||||
value pairs found in that profile.
|
||||
|
||||
:raises: ConfigNotFound, ConfigParseError
|
||||
"""
|
||||
config = {}
|
||||
path = config_filename
|
||||
if path is not None:
|
||||
path = os.path.expandvars(path)
|
||||
path = os.path.expanduser(path)
|
||||
if not os.path.isfile(path):
|
||||
raise botocore.exceptions.ConfigNotFound(path=_unicode_path(path))
|
||||
cp = configparser.RawConfigParser()
|
||||
try:
|
||||
cp.read([path])
|
||||
except (configparser.Error, UnicodeDecodeError) as e:
|
||||
raise botocore.exceptions.ConfigParseError(
|
||||
path=_unicode_path(path), error=e
|
||||
) from None
|
||||
else:
|
||||
for section in cp.sections():
|
||||
config[section] = {}
|
||||
for option in cp.options(section):
|
||||
config_value = cp.get(section, option)
|
||||
if parse_subsections and config_value.startswith('\n'):
|
||||
# Then we need to parse the inner contents as
|
||||
# hierarchical. We support a single level
|
||||
# of nesting for now.
|
||||
try:
|
||||
config_value = _parse_nested(config_value)
|
||||
except ValueError as e:
|
||||
raise botocore.exceptions.ConfigParseError(
|
||||
path=_unicode_path(path), error=e
|
||||
) from None
|
||||
config[section][option] = config_value
|
||||
return config
|
||||
|
||||
|
||||
def _unicode_path(path):
|
||||
if isinstance(path, str):
|
||||
return path
|
||||
# According to the documentation getfilesystemencoding can return None
|
||||
# on unix in which case the default encoding is used instead.
|
||||
filesystem_encoding = sys.getfilesystemencoding()
|
||||
if filesystem_encoding is None:
|
||||
filesystem_encoding = sys.getdefaultencoding()
|
||||
return path.decode(filesystem_encoding, 'replace')
|
||||
|
||||
|
||||
def _parse_nested(config_value):
|
||||
# Given a value like this:
|
||||
# \n
|
||||
# foo = bar
|
||||
# bar = baz
|
||||
# We need to parse this into
|
||||
# {'foo': 'bar', 'bar': 'baz}
|
||||
parsed = {}
|
||||
for line in config_value.splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# The caller will catch ValueError
|
||||
# and raise an appropriate error
|
||||
# if this fails.
|
||||
key, value = line.split('=', 1)
|
||||
parsed[key.strip()] = value.strip()
|
||||
return parsed
|
||||
|
||||
|
||||
def _parse_section(key, values):
|
||||
result = {}
|
||||
try:
|
||||
parts = shlex.split(key)
|
||||
except ValueError:
|
||||
return result
|
||||
if len(parts) == 2:
|
||||
result[parts[1]] = values
|
||||
return result
|
||||
|
||||
|
||||
def build_profile_map(parsed_ini_config):
|
||||
"""Convert the parsed INI config into a profile map.
|
||||
|
||||
The config file format requires that every profile except the
|
||||
default to be prepended with "profile", e.g.::
|
||||
|
||||
[profile test]
|
||||
aws_... = foo
|
||||
aws_... = bar
|
||||
|
||||
[profile bar]
|
||||
aws_... = foo
|
||||
aws_... = bar
|
||||
|
||||
# This is *not* a profile
|
||||
[preview]
|
||||
otherstuff = 1
|
||||
|
||||
# Neither is this
|
||||
[foobar]
|
||||
morestuff = 2
|
||||
|
||||
The build_profile_map will take a parsed INI config file where each top
|
||||
level key represents a section name, and convert into a format where all
|
||||
the profiles are under a single top level "profiles" key, and each key in
|
||||
the sub dictionary is a profile name. For example, the above config file
|
||||
would be converted from::
|
||||
|
||||
{"profile test": {"aws_...": "foo", "aws...": "bar"},
|
||||
"profile bar": {"aws...": "foo", "aws...": "bar"},
|
||||
"preview": {"otherstuff": ...},
|
||||
"foobar": {"morestuff": ...},
|
||||
}
|
||||
|
||||
into::
|
||||
|
||||
{"profiles": {"test": {"aws_...": "foo", "aws...": "bar"},
|
||||
"bar": {"aws...": "foo", "aws...": "bar"},
|
||||
"preview": {"otherstuff": ...},
|
||||
"foobar": {"morestuff": ...},
|
||||
}
|
||||
|
||||
If there are no profiles in the provided parsed INI contents, then
|
||||
an empty dict will be the value associated with the ``profiles`` key.
|
||||
|
||||
.. note::
|
||||
|
||||
This will not mutate the passed in parsed_ini_config. Instead it will
|
||||
make a deepcopy and return that value.
|
||||
|
||||
"""
|
||||
parsed_config = copy.deepcopy(parsed_ini_config)
|
||||
profiles = {}
|
||||
sso_sessions = {}
|
||||
services = {}
|
||||
final_config = {}
|
||||
for key, values in parsed_config.items():
|
||||
if key.startswith("profile"):
|
||||
profiles.update(_parse_section(key, values))
|
||||
elif key.startswith("sso-session"):
|
||||
sso_sessions.update(_parse_section(key, values))
|
||||
elif key.startswith("services"):
|
||||
services.update(_parse_section(key, values))
|
||||
elif key == 'default':
|
||||
# default section is special and is considered a profile
|
||||
# name but we don't require you use 'profile "default"'
|
||||
# as a section.
|
||||
profiles[key] = values
|
||||
else:
|
||||
final_config[key] = values
|
||||
final_config['profiles'] = profiles
|
||||
final_config['sso_sessions'] = sso_sessions
|
||||
final_config['services'] = services
|
||||
return final_config
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,127 @@
|
||||
# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
"""
|
||||
NOTE: All classes and functions in this module are considered private and are
|
||||
subject to abrupt breaking changes. Please do not use them directly.
|
||||
"""
|
||||
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass, field
|
||||
from functools import wraps
|
||||
|
||||
|
||||
@dataclass
|
||||
class ClientContext:
|
||||
"""
|
||||
Encapsulation of objects tracked within the ``_context`` context variable.
|
||||
|
||||
``features`` is a set responsible for storing features used during
|
||||
preparation of an AWS request. ``botocore.useragent.register_feature_id``
|
||||
is used to add to this set.
|
||||
"""
|
||||
|
||||
features: set[str] = field(default_factory=set)
|
||||
|
||||
|
||||
_context = ContextVar("_context")
|
||||
|
||||
|
||||
def get_context():
|
||||
"""Get the current ``_context`` context variable if set, else None."""
|
||||
return _context.get(None)
|
||||
|
||||
|
||||
def set_context(ctx):
|
||||
"""Set the current ``_context`` context variable.
|
||||
|
||||
:type ctx: ClientContext
|
||||
:param ctx: Client context object to set as the current context variable.
|
||||
|
||||
:rtype: contextvars.Token
|
||||
:returns: Token object used to revert the context variable to what it was
|
||||
before the corresponding set.
|
||||
"""
|
||||
token = _context.set(ctx)
|
||||
return token
|
||||
|
||||
|
||||
def reset_context(token):
|
||||
"""Reset the current ``_context`` context variable.
|
||||
|
||||
:type token: contextvars.Token
|
||||
:param token: Token object to reset the context variable.
|
||||
"""
|
||||
_context.reset(token)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def start_as_current_context(ctx=None):
|
||||
"""
|
||||
Context manager that copies the passed or current context object and sets
|
||||
it as the current context variable. If no context is found, a new
|
||||
``ClientContext`` object is created. It mainly ensures the context variable
|
||||
is reset to the previous value once the executed code returns.
|
||||
|
||||
Example usage:
|
||||
|
||||
def my_feature():
|
||||
with start_as_current_context():
|
||||
register_feature_id('MY_FEATURE')
|
||||
pass
|
||||
|
||||
:type ctx: ClientContext
|
||||
:param ctx: The client context object to set as the new context variable.
|
||||
If not provided, the current or a new context variable is used.
|
||||
"""
|
||||
current = ctx or get_context()
|
||||
if current is None:
|
||||
new = ClientContext()
|
||||
else:
|
||||
new = deepcopy(current)
|
||||
token = set_context(new)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
reset_context(token)
|
||||
|
||||
|
||||
def with_current_context(hook=None):
|
||||
"""
|
||||
Decorator that wraps ``start_as_current_context`` and optionally invokes a
|
||||
hook within the newly-set context. This is just syntactic sugar to avoid
|
||||
indenting existing code under the context manager.
|
||||
|
||||
Example usage:
|
||||
|
||||
@with_current_context(partial(register_feature_id, 'MY_FEATURE'))
|
||||
def my_feature():
|
||||
pass
|
||||
|
||||
:type hook: callable
|
||||
:param hook: A callable that will be invoked within the scope of the
|
||||
``start_as_current_context`` context manager.
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
with start_as_current_context():
|
||||
if hook:
|
||||
hook()
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,27 @@
|
||||
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
# A list of auth types supported by the signers in botocore/crt/auth.py. This
|
||||
# should always match the keys of botocore.crt.auth.CRT_AUTH_TYPE_MAPS. The
|
||||
# information is duplicated here so that it can be accessed in environments
|
||||
# where `awscrt` is not present and any import from botocore.crt.auth would
|
||||
# fail.
|
||||
CRT_SUPPORTED_AUTH_TYPES = (
|
||||
'v4',
|
||||
'v4-query',
|
||||
'v4a',
|
||||
's3v4',
|
||||
's3v4-query',
|
||||
's3v4a',
|
||||
's3v4a-query',
|
||||
)
|
||||
@@ -0,0 +1,631 @@
|
||||
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# http://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
import datetime
|
||||
from io import BytesIO
|
||||
|
||||
from botocore.auth import (
|
||||
SIGNED_HEADERS_BLACKLIST,
|
||||
STREAMING_UNSIGNED_PAYLOAD_TRAILER,
|
||||
UNSIGNED_PAYLOAD,
|
||||
BaseSigner,
|
||||
_get_body_as_dict,
|
||||
_host_from_url,
|
||||
)
|
||||
from botocore.compat import HTTPHeaders, awscrt, parse_qs, urlsplit, urlunsplit
|
||||
from botocore.exceptions import NoCredentialsError
|
||||
from botocore.useragent import register_feature_id
|
||||
from botocore.utils import percent_encode_sequence
|
||||
|
||||
|
||||
class CrtSigV4Auth(BaseSigner):
|
||||
REQUIRES_REGION = True
|
||||
_PRESIGNED_HEADERS_BLOCKLIST = [
|
||||
'Authorization',
|
||||
'X-Amz-Date',
|
||||
'X-Amz-Content-SHA256',
|
||||
'X-Amz-Security-Token',
|
||||
]
|
||||
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_HEADERS
|
||||
_USE_DOUBLE_URI_ENCODE = True
|
||||
_SHOULD_NORMALIZE_URI_PATH = True
|
||||
|
||||
def __init__(self, credentials, service_name, region_name):
|
||||
self.credentials = credentials
|
||||
self._service_name = service_name
|
||||
self._region_name = region_name
|
||||
self._expiration_in_seconds = None
|
||||
|
||||
def _is_streaming_checksum_payload(self, request):
|
||||
checksum_context = request.context.get('checksum', {})
|
||||
algorithm = checksum_context.get('request_algorithm')
|
||||
return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer'
|
||||
|
||||
def add_auth(self, request):
|
||||
if self.credentials is None:
|
||||
raise NoCredentialsError()
|
||||
|
||||
# Use utcnow() because that's what gets mocked by tests, but set
|
||||
# timezone because CRT assumes naive datetime is local time.
|
||||
datetime_now = datetime.datetime.utcnow().replace(
|
||||
tzinfo=datetime.timezone.utc
|
||||
)
|
||||
|
||||
# Use existing 'X-Amz-Content-SHA256' header if able
|
||||
existing_sha256 = self._get_existing_sha256(request)
|
||||
|
||||
self._modify_request_before_signing(request)
|
||||
|
||||
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
|
||||
access_key_id=self.credentials.access_key,
|
||||
secret_access_key=self.credentials.secret_key,
|
||||
session_token=self.credentials.token,
|
||||
)
|
||||
|
||||
if self._is_streaming_checksum_payload(request):
|
||||
explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER
|
||||
elif self._should_sha256_sign_payload(request):
|
||||
if existing_sha256:
|
||||
explicit_payload = existing_sha256
|
||||
else:
|
||||
explicit_payload = None # to be calculated during signing
|
||||
else:
|
||||
explicit_payload = UNSIGNED_PAYLOAD
|
||||
|
||||
if self._should_add_content_sha256_header(explicit_payload):
|
||||
body_header = (
|
||||
awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256
|
||||
)
|
||||
else:
|
||||
body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE
|
||||
|
||||
signing_config = awscrt.auth.AwsSigningConfig(
|
||||
algorithm=awscrt.auth.AwsSigningAlgorithm.V4,
|
||||
signature_type=self._SIGNATURE_TYPE,
|
||||
credentials_provider=credentials_provider,
|
||||
region=self._region_name,
|
||||
service=self._service_name,
|
||||
date=datetime_now,
|
||||
should_sign_header=self._should_sign_header,
|
||||
use_double_uri_encode=self._USE_DOUBLE_URI_ENCODE,
|
||||
should_normalize_uri_path=self._SHOULD_NORMALIZE_URI_PATH,
|
||||
signed_body_value=explicit_payload,
|
||||
signed_body_header_type=body_header,
|
||||
expiration_in_seconds=self._expiration_in_seconds,
|
||||
)
|
||||
crt_request = self._crt_request_from_aws_request(request)
|
||||
future = awscrt.auth.aws_sign_request(crt_request, signing_config)
|
||||
future.result()
|
||||
self._apply_signing_changes(request, crt_request)
|
||||
|
||||
def _crt_request_from_aws_request(self, aws_request):
|
||||
url_parts = urlsplit(aws_request.url)
|
||||
crt_path = url_parts.path if url_parts.path else '/'
|
||||
if aws_request.params:
|
||||
array = []
|
||||
for param, value in aws_request.params.items():
|
||||
value = str(value)
|
||||
array.append(f'{param}={value}')
|
||||
crt_path = crt_path + '?' + '&'.join(array)
|
||||
elif url_parts.query:
|
||||
crt_path = f'{crt_path}?{url_parts.query}'
|
||||
|
||||
crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items())
|
||||
|
||||
# CRT requires body (if it exists) to be an I/O stream.
|
||||
crt_body_stream = None
|
||||
if aws_request.body:
|
||||
if hasattr(aws_request.body, 'seek'):
|
||||
crt_body_stream = aws_request.body
|
||||
else:
|
||||
crt_body_stream = BytesIO(aws_request.body)
|
||||
|
||||
crt_request = awscrt.http.HttpRequest(
|
||||
method=aws_request.method,
|
||||
path=crt_path,
|
||||
headers=crt_headers,
|
||||
body_stream=crt_body_stream,
|
||||
)
|
||||
return crt_request
|
||||
|
||||
def _apply_signing_changes(self, aws_request, signed_crt_request):
|
||||
# Apply changes from signed CRT request to the AWSRequest
|
||||
aws_request.headers = HTTPHeaders.from_pairs(
|
||||
list(signed_crt_request.headers)
|
||||
)
|
||||
|
||||
def _should_sign_header(self, name, **kwargs):
|
||||
return name.lower() not in SIGNED_HEADERS_BLACKLIST
|
||||
|
||||
def _modify_request_before_signing(self, request):
|
||||
# This could be a retry. Make sure the previous
|
||||
# authorization headers are removed first.
|
||||
for h in self._PRESIGNED_HEADERS_BLOCKLIST:
|
||||
if h in request.headers:
|
||||
del request.headers[h]
|
||||
# If necessary, add the host header
|
||||
if 'host' not in request.headers:
|
||||
request.headers['host'] = _host_from_url(request.url)
|
||||
|
||||
def _get_existing_sha256(self, request):
|
||||
return request.headers.get('X-Amz-Content-SHA256')
|
||||
|
||||
def _should_sha256_sign_payload(self, request):
|
||||
# Payloads will always be signed over insecure connections.
|
||||
if not request.url.startswith('https'):
|
||||
return True
|
||||
|
||||
# Certain operations may have payload signing disabled by default.
|
||||
# Since we don't have access to the operation model, we pass in this
|
||||
# bit of metadata through the request context.
|
||||
return request.context.get('payload_signing_enabled', True)
|
||||
|
||||
def _should_add_content_sha256_header(self, explicit_payload):
|
||||
# only add X-Amz-Content-SHA256 header if payload is explicitly set
|
||||
return explicit_payload is not None
|
||||
|
||||
|
||||
class CrtS3SigV4Auth(CrtSigV4Auth):
|
||||
# For S3, we do not normalize the path.
|
||||
_USE_DOUBLE_URI_ENCODE = False
|
||||
_SHOULD_NORMALIZE_URI_PATH = False
|
||||
|
||||
def _get_existing_sha256(self, request):
|
||||
# always recalculate
|
||||
return None
|
||||
|
||||
def _should_sha256_sign_payload(self, request):
|
||||
# S3 allows optional body signing, so to minimize the performance
|
||||
# impact, we opt to not SHA256 sign the body on streaming uploads,
|
||||
# provided that we're on https.
|
||||
client_config = request.context.get('client_config')
|
||||
s3_config = getattr(client_config, 's3', None)
|
||||
|
||||
# The config could be None if it isn't set, or if the customer sets it
|
||||
# to None.
|
||||
if s3_config is None:
|
||||
s3_config = {}
|
||||
|
||||
# The explicit configuration takes precedence over any implicit
|
||||
# configuration.
|
||||
sign_payload = s3_config.get('payload_signing_enabled', None)
|
||||
if sign_payload is not None:
|
||||
return sign_payload
|
||||
|
||||
# We require that both a checksum be present and https be enabled
|
||||
# to implicitly disable body signing. The combination of TLS and
|
||||
# a checksum is sufficiently secure and durable for us to be
|
||||
# confident in the request without body signing.
|
||||
checksum_header = 'Content-MD5'
|
||||
checksum_context = request.context.get('checksum', {})
|
||||
algorithm = checksum_context.get('request_algorithm')
|
||||
if isinstance(algorithm, dict) and algorithm.get('in') == 'header':
|
||||
checksum_header = algorithm['name']
|
||||
if (
|
||||
not request.url.startswith('https')
|
||||
or checksum_header not in request.headers
|
||||
):
|
||||
return True
|
||||
|
||||
# If the input is streaming we disable body signing by default.
|
||||
if request.context.get('has_streaming_input', False):
|
||||
return False
|
||||
|
||||
# If the S3-specific checks had no results, delegate to the generic
|
||||
# checks.
|
||||
return super()._should_sha256_sign_payload(request)
|
||||
|
||||
def _should_add_content_sha256_header(self, explicit_payload):
|
||||
# Always add X-Amz-Content-SHA256 header
|
||||
return True
|
||||
|
||||
|
||||
class CrtSigV4AsymAuth(BaseSigner):
|
||||
REQUIRES_REGION = True
|
||||
_PRESIGNED_HEADERS_BLOCKLIST = [
|
||||
'Authorization',
|
||||
'X-Amz-Date',
|
||||
'X-Amz-Content-SHA256',
|
||||
'X-Amz-Security-Token',
|
||||
]
|
||||
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_HEADERS
|
||||
_USE_DOUBLE_URI_ENCODE = True
|
||||
_SHOULD_NORMALIZE_URI_PATH = True
|
||||
|
||||
def __init__(self, credentials, service_name, region_name):
|
||||
self.credentials = credentials
|
||||
self._service_name = service_name
|
||||
self._region_name = region_name
|
||||
self._expiration_in_seconds = None
|
||||
|
||||
def add_auth(self, request):
|
||||
register_feature_id("SIGV4A_SIGNING")
|
||||
if self.credentials is None:
|
||||
raise NoCredentialsError()
|
||||
|
||||
# Use utcnow() because that's what gets mocked by tests, but set
|
||||
# timezone because CRT assumes naive datetime is local time.
|
||||
datetime_now = datetime.datetime.utcnow().replace(
|
||||
tzinfo=datetime.timezone.utc
|
||||
)
|
||||
|
||||
# Use existing 'X-Amz-Content-SHA256' header if able
|
||||
existing_sha256 = self._get_existing_sha256(request)
|
||||
|
||||
self._modify_request_before_signing(request)
|
||||
|
||||
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
|
||||
access_key_id=self.credentials.access_key,
|
||||
secret_access_key=self.credentials.secret_key,
|
||||
session_token=self.credentials.token,
|
||||
)
|
||||
|
||||
if self._is_streaming_checksum_payload(request):
|
||||
explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER
|
||||
elif self._should_sha256_sign_payload(request):
|
||||
if existing_sha256:
|
||||
explicit_payload = existing_sha256
|
||||
else:
|
||||
explicit_payload = None # to be calculated during signing
|
||||
else:
|
||||
explicit_payload = UNSIGNED_PAYLOAD
|
||||
|
||||
if self._should_add_content_sha256_header(explicit_payload):
|
||||
body_header = (
|
||||
awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256
|
||||
)
|
||||
else:
|
||||
body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE
|
||||
|
||||
signing_config = awscrt.auth.AwsSigningConfig(
|
||||
algorithm=awscrt.auth.AwsSigningAlgorithm.V4_ASYMMETRIC,
|
||||
signature_type=self._SIGNATURE_TYPE,
|
||||
credentials_provider=credentials_provider,
|
||||
region=self._region_name,
|
||||
service=self._service_name,
|
||||
date=datetime_now,
|
||||
should_sign_header=self._should_sign_header,
|
||||
use_double_uri_encode=self._USE_DOUBLE_URI_ENCODE,
|
||||
should_normalize_uri_path=self._SHOULD_NORMALIZE_URI_PATH,
|
||||
signed_body_value=explicit_payload,
|
||||
signed_body_header_type=body_header,
|
||||
expiration_in_seconds=self._expiration_in_seconds,
|
||||
)
|
||||
crt_request = self._crt_request_from_aws_request(request)
|
||||
future = awscrt.auth.aws_sign_request(crt_request, signing_config)
|
||||
future.result()
|
||||
self._apply_signing_changes(request, crt_request)
|
||||
|
||||
def _crt_request_from_aws_request(self, aws_request):
|
||||
url_parts = urlsplit(aws_request.url)
|
||||
crt_path = url_parts.path if url_parts.path else '/'
|
||||
if aws_request.params:
|
||||
array = []
|
||||
for param, value in aws_request.params.items():
|
||||
value = str(value)
|
||||
array.append(f'{param}={value}')
|
||||
crt_path = crt_path + '?' + '&'.join(array)
|
||||
elif url_parts.query:
|
||||
crt_path = f'{crt_path}?{url_parts.query}'
|
||||
|
||||
crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items())
|
||||
|
||||
# CRT requires body (if it exists) to be an I/O stream.
|
||||
crt_body_stream = None
|
||||
if aws_request.body:
|
||||
if hasattr(aws_request.body, 'seek'):
|
||||
crt_body_stream = aws_request.body
|
||||
else:
|
||||
crt_body_stream = BytesIO(aws_request.body)
|
||||
|
||||
crt_request = awscrt.http.HttpRequest(
|
||||
method=aws_request.method,
|
||||
path=crt_path,
|
||||
headers=crt_headers,
|
||||
body_stream=crt_body_stream,
|
||||
)
|
||||
return crt_request
|
||||
|
||||
def _apply_signing_changes(self, aws_request, signed_crt_request):
|
||||
# Apply changes from signed CRT request to the AWSRequest
|
||||
aws_request.headers = HTTPHeaders.from_pairs(
|
||||
list(signed_crt_request.headers)
|
||||
)
|
||||
|
||||
def _should_sign_header(self, name, **kwargs):
|
||||
return name.lower() not in SIGNED_HEADERS_BLACKLIST
|
||||
|
||||
def _modify_request_before_signing(self, request):
|
||||
# This could be a retry. Make sure the previous
|
||||
# authorization headers are removed first.
|
||||
for h in self._PRESIGNED_HEADERS_BLOCKLIST:
|
||||
if h in request.headers:
|
||||
del request.headers[h]
|
||||
# If necessary, add the host header
|
||||
if 'host' not in request.headers:
|
||||
request.headers['host'] = _host_from_url(request.url)
|
||||
|
||||
def _get_existing_sha256(self, request):
|
||||
return request.headers.get('X-Amz-Content-SHA256')
|
||||
|
||||
def _is_streaming_checksum_payload(self, request):
|
||||
checksum_context = request.context.get('checksum', {})
|
||||
algorithm = checksum_context.get('request_algorithm')
|
||||
return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer'
|
||||
|
||||
def _should_sha256_sign_payload(self, request):
|
||||
# Payloads will always be signed over insecure connections.
|
||||
if not request.url.startswith('https'):
|
||||
return True
|
||||
|
||||
# Certain operations may have payload signing disabled by default.
|
||||
# Since we don't have access to the operation model, we pass in this
|
||||
# bit of metadata through the request context.
|
||||
return request.context.get('payload_signing_enabled', True)
|
||||
|
||||
def _should_add_content_sha256_header(self, explicit_payload):
|
||||
# only add X-Amz-Content-SHA256 header if payload is explicitly set
|
||||
return explicit_payload is not None
|
||||
|
||||
|
||||
class CrtS3SigV4AsymAuth(CrtSigV4AsymAuth):
|
||||
# For S3, we do not normalize the path.
|
||||
_USE_DOUBLE_URI_ENCODE = False
|
||||
_SHOULD_NORMALIZE_URI_PATH = False
|
||||
|
||||
def _get_existing_sha256(self, request):
|
||||
# always recalculate
|
||||
return None
|
||||
|
||||
def _should_sha256_sign_payload(self, request):
|
||||
# S3 allows optional body signing, so to minimize the performance
|
||||
# impact, we opt to not SHA256 sign the body on streaming uploads,
|
||||
# provided that we're on https.
|
||||
client_config = request.context.get('client_config')
|
||||
s3_config = getattr(client_config, 's3', None)
|
||||
|
||||
# The config could be None if it isn't set, or if the customer sets it
|
||||
# to None.
|
||||
if s3_config is None:
|
||||
s3_config = {}
|
||||
|
||||
# The explicit configuration takes precedence over any implicit
|
||||
# configuration.
|
||||
sign_payload = s3_config.get('payload_signing_enabled', None)
|
||||
if sign_payload is not None:
|
||||
return sign_payload
|
||||
|
||||
# We require that both content-md5 be present and https be enabled
|
||||
# to implicitly disable body signing. The combination of TLS and
|
||||
# content-md5 is sufficiently secure and durable for us to be
|
||||
# confident in the request without body signing.
|
||||
if (
|
||||
not request.url.startswith('https')
|
||||
or 'Content-MD5' not in request.headers
|
||||
):
|
||||
return True
|
||||
|
||||
# If the input is streaming we disable body signing by default.
|
||||
if request.context.get('has_streaming_input', False):
|
||||
return False
|
||||
|
||||
# If the S3-specific checks had no results, delegate to the generic
|
||||
# checks.
|
||||
return super()._should_sha256_sign_payload(request)
|
||||
|
||||
def _should_add_content_sha256_header(self, explicit_payload):
|
||||
# Always add X-Amz-Content-SHA256 header
|
||||
return True
|
||||
|
||||
|
||||
class CrtSigV4AsymQueryAuth(CrtSigV4AsymAuth):
|
||||
DEFAULT_EXPIRES = 3600
|
||||
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS
|
||||
|
||||
def __init__(
|
||||
self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES
|
||||
):
|
||||
super().__init__(credentials, service_name, region_name)
|
||||
self._expiration_in_seconds = expires
|
||||
|
||||
def _modify_request_before_signing(self, request):
|
||||
super()._modify_request_before_signing(request)
|
||||
|
||||
# We automatically set this header, so if it's the auto-set value we
|
||||
# want to get rid of it since it doesn't make sense for presigned urls.
|
||||
content_type = request.headers.get('content-type')
|
||||
if content_type == 'application/x-www-form-urlencoded; charset=utf-8':
|
||||
del request.headers['content-type']
|
||||
|
||||
# Now parse the original query string to a dict, inject our new query
|
||||
# params, and serialize back to a query string.
|
||||
url_parts = urlsplit(request.url)
|
||||
# parse_qs makes each value a list, but in our case we know we won't
|
||||
# have repeated keys so we know we have single element lists which we
|
||||
# can convert back to scalar values.
|
||||
query_string_parts = parse_qs(url_parts.query, keep_blank_values=True)
|
||||
query_dict = {k: v[0] for k, v in query_string_parts.items()}
|
||||
|
||||
# The spec is particular about this. It *has* to be:
|
||||
# https://<endpoint>?<operation params>&<auth params>
|
||||
# You can't mix the two types of params together, i.e just keep doing
|
||||
# new_query_params.update(op_params)
|
||||
# new_query_params.update(auth_params)
|
||||
# percent_encode_sequence(new_query_params)
|
||||
if request.data:
|
||||
# We also need to move the body params into the query string. To
|
||||
# do this, we first have to convert it to a dict.
|
||||
query_dict.update(_get_body_as_dict(request))
|
||||
request.data = ''
|
||||
new_query_string = percent_encode_sequence(query_dict)
|
||||
# url_parts is a tuple (and therefore immutable) so we need to create
|
||||
# a new url_parts with the new query string.
|
||||
# <part> - <index>
|
||||
# scheme - 0
|
||||
# netloc - 1
|
||||
# path - 2
|
||||
# query - 3 <-- we're replacing this.
|
||||
# fragment - 4
|
||||
p = url_parts
|
||||
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
|
||||
request.url = urlunsplit(new_url_parts)
|
||||
|
||||
def _apply_signing_changes(self, aws_request, signed_crt_request):
|
||||
# Apply changes from signed CRT request to the AWSRequest
|
||||
super()._apply_signing_changes(aws_request, signed_crt_request)
|
||||
|
||||
signed_query = urlsplit(signed_crt_request.path).query
|
||||
p = urlsplit(aws_request.url)
|
||||
# urlsplit() returns a tuple (and therefore immutable) so we
|
||||
# need to create new url with the new query string.
|
||||
# <part> - <index>
|
||||
# scheme - 0
|
||||
# netloc - 1
|
||||
# path - 2
|
||||
# query - 3 <-- we're replacing this.
|
||||
# fragment - 4
|
||||
aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4]))
|
||||
|
||||
|
||||
class CrtS3SigV4AsymQueryAuth(CrtSigV4AsymQueryAuth):
|
||||
"""S3 SigV4A auth using query parameters.
|
||||
This signer will sign a request using query parameters and signature
|
||||
version 4A, i.e a "presigned url" signer.
|
||||
"""
|
||||
|
||||
# For S3, we do not normalize the path.
|
||||
_USE_DOUBLE_URI_ENCODE = False
|
||||
_SHOULD_NORMALIZE_URI_PATH = False
|
||||
|
||||
def _should_sha256_sign_payload(self, request):
|
||||
# From the doc link above:
|
||||
# "You don't include a payload hash in the Canonical Request, because
|
||||
# when you create a presigned URL, you don't know anything about the
|
||||
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
|
||||
return False
|
||||
|
||||
def _should_add_content_sha256_header(self, explicit_payload):
|
||||
# Never add X-Amz-Content-SHA256 header
|
||||
return False
|
||||
|
||||
|
||||
class CrtSigV4QueryAuth(CrtSigV4Auth):
|
||||
DEFAULT_EXPIRES = 3600
|
||||
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS
|
||||
|
||||
def __init__(
|
||||
self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES
|
||||
):
|
||||
super().__init__(credentials, service_name, region_name)
|
||||
self._expiration_in_seconds = expires
|
||||
|
||||
def _modify_request_before_signing(self, request):
|
||||
super()._modify_request_before_signing(request)
|
||||
|
||||
# We automatically set this header, so if it's the auto-set value we
|
||||
# want to get rid of it since it doesn't make sense for presigned urls.
|
||||
content_type = request.headers.get('content-type')
|
||||
if content_type == 'application/x-www-form-urlencoded; charset=utf-8':
|
||||
del request.headers['content-type']
|
||||
|
||||
# Now parse the original query string to a dict, inject our new query
|
||||
# params, and serialize back to a query string.
|
||||
url_parts = urlsplit(request.url)
|
||||
# parse_qs makes each value a list, but in our case we know we won't
|
||||
# have repeated keys so we know we have single element lists which we
|
||||
# can convert back to scalar values.
|
||||
query_dict = {
|
||||
k: v[0]
|
||||
for k, v in parse_qs(
|
||||
url_parts.query, keep_blank_values=True
|
||||
).items()
|
||||
}
|
||||
if request.params:
|
||||
query_dict.update(request.params)
|
||||
request.params = {}
|
||||
# The spec is particular about this. It *has* to be:
|
||||
# https://<endpoint>?<operation params>&<auth params>
|
||||
# You can't mix the two types of params together, i.e just keep doing
|
||||
# new_query_params.update(op_params)
|
||||
# new_query_params.update(auth_params)
|
||||
# percent_encode_sequence(new_query_params)
|
||||
if request.data:
|
||||
# We also need to move the body params into the query string. To
|
||||
# do this, we first have to convert it to a dict.
|
||||
query_dict.update(_get_body_as_dict(request))
|
||||
request.data = ''
|
||||
new_query_string = percent_encode_sequence(query_dict)
|
||||
# url_parts is a tuple (and therefore immutable) so we need to create
|
||||
# a new url_parts with the new query string.
|
||||
# <part> - <index>
|
||||
# scheme - 0
|
||||
# netloc - 1
|
||||
# path - 2
|
||||
# query - 3 <-- we're replacing this.
|
||||
# fragment - 4
|
||||
p = url_parts
|
||||
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
|
||||
request.url = urlunsplit(new_url_parts)
|
||||
|
||||
def _apply_signing_changes(self, aws_request, signed_crt_request):
|
||||
# Apply changes from signed CRT request to the AWSRequest
|
||||
super()._apply_signing_changes(aws_request, signed_crt_request)
|
||||
|
||||
signed_query = urlsplit(signed_crt_request.path).query
|
||||
p = urlsplit(aws_request.url)
|
||||
# urlsplit() returns a tuple (and therefore immutable) so we
|
||||
# need to create new url with the new query string.
|
||||
# <part> - <index>
|
||||
# scheme - 0
|
||||
# netloc - 1
|
||||
# path - 2
|
||||
# query - 3 <-- we're replacing this.
|
||||
# fragment - 4
|
||||
aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4]))
|
||||
|
||||
|
||||
class CrtS3SigV4QueryAuth(CrtSigV4QueryAuth):
|
||||
"""S3 SigV4 auth using query parameters.
|
||||
This signer will sign a request using query parameters and signature
|
||||
version 4, i.e a "presigned url" signer.
|
||||
Based off of:
|
||||
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
|
||||
"""
|
||||
|
||||
# For S3, we do not normalize the path.
|
||||
_USE_DOUBLE_URI_ENCODE = False
|
||||
_SHOULD_NORMALIZE_URI_PATH = False
|
||||
|
||||
def _should_sha256_sign_payload(self, request):
|
||||
# From the doc link above:
|
||||
# "You don't include a payload hash in the Canonical Request, because
|
||||
# when you create a presigned URL, you don't know anything about the
|
||||
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
|
||||
return False
|
||||
|
||||
def _should_add_content_sha256_header(self, explicit_payload):
|
||||
# Never add X-Amz-Content-SHA256 header
|
||||
return False
|
||||
|
||||
|
||||
# Defined at the bottom of module to ensure all Auth
|
||||
# classes are defined.
|
||||
CRT_AUTH_TYPE_MAPS = {
|
||||
'v4': CrtSigV4Auth,
|
||||
'v4-query': CrtSigV4QueryAuth,
|
||||
'v4a': CrtSigV4AsymAuth,
|
||||
's3v4': CrtS3SigV4Auth,
|
||||
's3v4-query': CrtS3SigV4QueryAuth,
|
||||
's3v4a': CrtS3SigV4AsymAuth,
|
||||
's3v4a-query': CrtS3SigV4AsymQueryAuth,
|
||||
}
|
||||
@@ -0,0 +1,292 @@
|
||||
{
|
||||
"definitions": {
|
||||
"throttling": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "Throttling",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
},
|
||||
"throttling_exception": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "ThrottlingException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
},
|
||||
"throttled_exception": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "ThrottledException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
},
|
||||
"request_throttled_exception": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "RequestThrottledException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
},
|
||||
"too_many_requests": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 429
|
||||
}
|
||||
}
|
||||
},
|
||||
"general_socket_errors": {
|
||||
"applies_when": {
|
||||
"socket_errors": ["GENERAL_CONNECTION_ERROR"]
|
||||
}
|
||||
},
|
||||
"general_server_error": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 500
|
||||
}
|
||||
}
|
||||
},
|
||||
"bad_gateway": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 502
|
||||
}
|
||||
}
|
||||
},
|
||||
"service_unavailable": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 503
|
||||
}
|
||||
}
|
||||
},
|
||||
"gateway_timeout": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 504
|
||||
}
|
||||
}
|
||||
},
|
||||
"limit_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 509
|
||||
}
|
||||
}
|
||||
},
|
||||
"throughput_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "ProvisionedThroughputExceededException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"retry": {
|
||||
"__default__": {
|
||||
"max_attempts": 5,
|
||||
"delay": {
|
||||
"type": "exponential",
|
||||
"base": "rand",
|
||||
"growth_factor": 2
|
||||
},
|
||||
"policies": {
|
||||
"general_socket_errors": {"$ref": "general_socket_errors"},
|
||||
"general_server_error": {"$ref": "general_server_error"},
|
||||
"bad_gateway": {"$ref": "bad_gateway"},
|
||||
"service_unavailable": {"$ref": "service_unavailable"},
|
||||
"gateway_timeout": {"$ref": "gateway_timeout"},
|
||||
"limit_exceeded": {"$ref": "limit_exceeded"},
|
||||
"throttling_exception": {"$ref": "throttling_exception"},
|
||||
"throttled_exception": {"$ref": "throttled_exception"},
|
||||
"request_throttled_exception": {"$ref": "request_throttled_exception"},
|
||||
"throttling": {"$ref": "throttling"},
|
||||
"too_many_requests": {"$ref": "too_many_requests"},
|
||||
"throughput_exceeded": {"$ref": "throughput_exceeded"}
|
||||
}
|
||||
},
|
||||
"organizations": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"too_many_requests": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "TooManyRequestsException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"dynamodb": {
|
||||
"__default__": {
|
||||
"max_attempts": 10,
|
||||
"delay": {
|
||||
"type": "exponential",
|
||||
"base": 0.05,
|
||||
"growth_factor": 2
|
||||
},
|
||||
"policies": {
|
||||
"still_processing": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "TransactionInProgressException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
},
|
||||
"crc32": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"crc32body": "x-amz-crc32"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ec2": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"request_limit_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "RequestLimitExceeded",
|
||||
"http_status_code": 503
|
||||
}
|
||||
}
|
||||
},
|
||||
"ec2_throttled_exception": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "EC2ThrottledException",
|
||||
"http_status_code": 503
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cloudsearch": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"request_limit_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "BandwidthLimitExceeded",
|
||||
"http_status_code": 509
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"kinesis": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"request_limit_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "LimitExceededException",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sqs": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"request_limit_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "RequestThrottled",
|
||||
"http_status_code": 403
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"s3": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"timeouts": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 400,
|
||||
"service_error_code": "RequestTimeout"
|
||||
}
|
||||
}
|
||||
},
|
||||
"contentmd5": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 400,
|
||||
"service_error_code": "BadDigest"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"glacier": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"timeouts": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"http_status_code": 408,
|
||||
"service_error_code": "RequestTimeoutException"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"route53": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"request_limit_exceeded": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "Throttling",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
},
|
||||
"still_processing": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "PriorRequestNotComplete",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sts": {
|
||||
"__default__": {
|
||||
"policies": {
|
||||
"idp_unreachable_error": {
|
||||
"applies_when": {
|
||||
"response": {
|
||||
"service_error_code": "IDPCommunicationError",
|
||||
"http_status_code": 400
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+70
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListAnalyzedResources": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "analyzedResources"
|
||||
},
|
||||
"ListAnalyzers": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "analyzers"
|
||||
},
|
||||
"ListArchiveRules": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "archiveRules"
|
||||
},
|
||||
"ListFindings": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "findings"
|
||||
},
|
||||
"ListAccessPreviewFindings": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "findings"
|
||||
},
|
||||
"ListAccessPreviews": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "accessPreviews"
|
||||
},
|
||||
"ValidatePolicy": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "findings"
|
||||
},
|
||||
"ListPolicyGenerations": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "policyGenerations"
|
||||
},
|
||||
"GetFindingV2": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "findingDetails"
|
||||
},
|
||||
"ListFindingsV2": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "findings"
|
||||
},
|
||||
"GetFindingRecommendation": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "recommendedSteps"
|
||||
}
|
||||
}
|
||||
}
|
||||
+31
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"version": 1.0,
|
||||
"merge": {
|
||||
"pagination": {
|
||||
"GetFindingV2": {
|
||||
"non_aggregate_keys": [
|
||||
"resource",
|
||||
"status",
|
||||
"error",
|
||||
"createdAt",
|
||||
"resourceType",
|
||||
"findingType",
|
||||
"resourceOwnerAccount",
|
||||
"analyzedAt",
|
||||
"id",
|
||||
"updatedAt"
|
||||
]
|
||||
},
|
||||
"GetFindingRecommendation": {
|
||||
"non_aggregate_keys": [
|
||||
"status",
|
||||
"error",
|
||||
"completedAt",
|
||||
"recommendationType",
|
||||
"resourceArn",
|
||||
"startedAt"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
LFS
BIN
Binary file not shown.
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+10
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListRegions": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Regions"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+22
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListCertificateAuthorities": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "CertificateAuthorities"
|
||||
},
|
||||
"ListTags": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Tags"
|
||||
},
|
||||
"ListPermissions": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Permissions"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+64
@@ -0,0 +1,64 @@
|
||||
{
|
||||
"version" : 2,
|
||||
"waiters" : {
|
||||
"AuditReportCreated" : {
|
||||
"description" : "Wait until a Audit Report is created",
|
||||
"delay" : 3,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "DescribeCertificateAuthorityAuditReport",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "path",
|
||||
"argument" : "AuditReportStatus",
|
||||
"state" : "success",
|
||||
"expected" : "SUCCESS"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "AuditReportStatus",
|
||||
"state" : "failure",
|
||||
"expected" : "FAILED"
|
||||
}, {
|
||||
"matcher" : "error",
|
||||
"state" : "failure",
|
||||
"expected" : "AccessDeniedException"
|
||||
} ]
|
||||
},
|
||||
"CertificateAuthorityCSRCreated" : {
|
||||
"description" : "Wait until a Certificate Authority CSR is created",
|
||||
"delay" : 3,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "GetCertificateAuthorityCsr",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "error",
|
||||
"state" : "success",
|
||||
"expected" : false
|
||||
}, {
|
||||
"matcher" : "error",
|
||||
"state" : "retry",
|
||||
"expected" : "RequestInProgressException"
|
||||
}, {
|
||||
"matcher" : "error",
|
||||
"state" : "failure",
|
||||
"expected" : "AccessDeniedException"
|
||||
} ]
|
||||
},
|
||||
"CertificateIssued" : {
|
||||
"description" : "Wait until a certificate is issued",
|
||||
"delay" : 1,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "GetCertificate",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "error",
|
||||
"state" : "success",
|
||||
"expected" : false
|
||||
}, {
|
||||
"matcher" : "error",
|
||||
"state" : "retry",
|
||||
"expected" : "RequestInProgressException"
|
||||
}, {
|
||||
"matcher" : "error",
|
||||
"state" : "failure",
|
||||
"expected" : "AccessDeniedException"
|
||||
} ]
|
||||
}
|
||||
}
|
||||
}
|
||||
LFS
BIN
Binary file not shown.
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+10
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListCertificates": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxItems",
|
||||
"result_key": "CertificateSummaryList"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"version" : 2,
|
||||
"waiters" : {
|
||||
"CertificateValidated" : {
|
||||
"delay" : 60,
|
||||
"maxAttempts" : 5,
|
||||
"operation" : "DescribeCertificate",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "pathAll",
|
||||
"argument" : "Certificate.DomainValidationOptions[].ValidationStatus",
|
||||
"state" : "success",
|
||||
"expected" : "SUCCESS"
|
||||
}, {
|
||||
"matcher" : "pathAny",
|
||||
"argument" : "Certificate.DomainValidationOptions[].ValidationStatus",
|
||||
"state" : "retry",
|
||||
"expected" : "PENDING_VALIDATION"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "Certificate.Status",
|
||||
"state" : "failure",
|
||||
"expected" : "FAILED"
|
||||
}, {
|
||||
"matcher" : "error",
|
||||
"state" : "failure",
|
||||
"expected" : "ResourceNotFoundException"
|
||||
} ]
|
||||
}
|
||||
}
|
||||
}
|
||||
scripts/addons/Rokoko Libraries/python311/botocore/data/aiops/2018-05-10/endpoint-rule-set-1.json.gz
LFS
BIN
Binary file not shown.
+10
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListInvestigationGroups": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "investigationGroups"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": 2,
|
||||
"waiters": {
|
||||
}
|
||||
}
|
||||
LFS
BIN
Binary file not shown.
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+22
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListWorkspaces": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "workspaces"
|
||||
},
|
||||
"ListRuleGroupsNamespaces": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "ruleGroupsNamespaces"
|
||||
},
|
||||
"ListScrapers": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "scrapers"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"version" : 2,
|
||||
"waiters" : {
|
||||
"ScraperActive" : {
|
||||
"description" : "Wait until a scraper reaches ACTIVE status",
|
||||
"delay" : 2,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "DescribeScraper",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "path",
|
||||
"argument" : "scraper.status.statusCode",
|
||||
"state" : "success",
|
||||
"expected" : "ACTIVE"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "scraper.status.statusCode",
|
||||
"state" : "failure",
|
||||
"expected" : "CREATION_FAILED"
|
||||
} ]
|
||||
},
|
||||
"ScraperDeleted" : {
|
||||
"description" : "Wait until a scraper reaches DELETED status",
|
||||
"delay" : 2,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "DescribeScraper",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "error",
|
||||
"state" : "success",
|
||||
"expected" : "ResourceNotFoundException"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "scraper.status.statusCode",
|
||||
"state" : "failure",
|
||||
"expected" : "DELETION_FAILED"
|
||||
} ]
|
||||
},
|
||||
"WorkspaceActive" : {
|
||||
"description" : "Wait until a workspace reaches ACTIVE status",
|
||||
"delay" : 2,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "DescribeWorkspace",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "path",
|
||||
"argument" : "workspace.status.statusCode",
|
||||
"state" : "success",
|
||||
"expected" : "ACTIVE"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "workspace.status.statusCode",
|
||||
"state" : "retry",
|
||||
"expected" : "UPDATING"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "workspace.status.statusCode",
|
||||
"state" : "retry",
|
||||
"expected" : "CREATING"
|
||||
} ]
|
||||
},
|
||||
"WorkspaceDeleted" : {
|
||||
"description" : "Wait until a workspace reaches DELETED status",
|
||||
"delay" : 2,
|
||||
"maxAttempts" : 60,
|
||||
"operation" : "DescribeWorkspace",
|
||||
"acceptors" : [ {
|
||||
"matcher" : "error",
|
||||
"state" : "success",
|
||||
"expected" : "ResourceNotFoundException"
|
||||
}, {
|
||||
"matcher" : "path",
|
||||
"argument" : "workspace.status.statusCode",
|
||||
"state" : "retry",
|
||||
"expected" : "DELETING"
|
||||
} ]
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+28
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListApps": {
|
||||
"input_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "apps"
|
||||
},
|
||||
"ListBranches": {
|
||||
"input_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "branches"
|
||||
},
|
||||
"ListDomainAssociations": {
|
||||
"input_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "domainAssociations"
|
||||
},
|
||||
"ListJobs": {
|
||||
"input_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "jobSummaries"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+10
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListBackendJobs": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Jobs"
|
||||
}
|
||||
}
|
||||
}
|
||||
LFS
BIN
Binary file not shown.
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+43
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListComponents": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "entities"
|
||||
},
|
||||
"ListThemes": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "entities"
|
||||
},
|
||||
"ExportComponents": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "entities"
|
||||
},
|
||||
"ExportThemes": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "entities"
|
||||
},
|
||||
"ExportForms": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"result_key": "entities"
|
||||
},
|
||||
"ListForms": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "entities"
|
||||
},
|
||||
"ListCodegenJobs": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "entities"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": 2,
|
||||
"waiters": {
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+117
@@ -0,0 +1,117 @@
|
||||
{
|
||||
"pagination": {
|
||||
"GetApiKeys": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetBasePathMappings": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetClientCertificates": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetDeployments": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetDomainNames": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetModels": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetResources": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetRestApis": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetUsage": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items",
|
||||
"non_aggregate_keys": [
|
||||
"usagePlanId",
|
||||
"startDate",
|
||||
"endDate"
|
||||
]
|
||||
},
|
||||
"GetUsagePlans": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetUsagePlanKeys": {
|
||||
"input_token": "position",
|
||||
"output_token": "position",
|
||||
"limit_key": "limit",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetVpcLinks": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetAuthorizers": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetDocumentationParts": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetDocumentationVersions": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetGatewayResponses": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetRequestValidators": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
},
|
||||
"GetSdkTypes": {
|
||||
"input_token": "position",
|
||||
"limit_key": "limit",
|
||||
"output_token": "position",
|
||||
"result_key": "items"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+3
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"pagination": {}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+70
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"pagination": {
|
||||
"GetApis": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetAuthorizers": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetDeployments": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetDomainNames": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetIntegrationResponses": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetIntegrations": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetModels": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetRouteResponses": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetRoutes": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"GetStages": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListRoutingRules": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "RoutingRules"
|
||||
}
|
||||
}
|
||||
}
|
||||
LFS
BIN
Binary file not shown.
BIN
Binary file not shown.
+720
@@ -0,0 +1,720 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
"CreateApplication": [
|
||||
{
|
||||
"input": {
|
||||
"Description": "An application used for creating an example.",
|
||||
"Name": "example-application"
|
||||
},
|
||||
"output": {
|
||||
"Description": "An application used for creating an example.",
|
||||
"Id": "339ohji",
|
||||
"Name": "example-application"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following create-application example creates an application in AWS AppConfig.",
|
||||
"id": "to-create-an-application-1632264511615",
|
||||
"title": "To create an application"
|
||||
}
|
||||
],
|
||||
"CreateConfigurationProfile": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"LocationUri": "ssm-parameter://Example-Parameter",
|
||||
"Name": "Example-Configuration-Profile",
|
||||
"RetrievalRoleArn": "arn:aws:iam::111122223333:role/Example-App-Config-Role"
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Id": "ur8hx2f",
|
||||
"LocationUri": "ssm-parameter://Example-Parameter",
|
||||
"Name": "Example-Configuration-Profile",
|
||||
"RetrievalRoleArn": "arn:aws:iam::111122223333:role/Example-App-Config-Role"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following create-configuration-profile example creates a configuration profile using a configuration stored in Parameter Store, a capability of Systems Manager.",
|
||||
"id": "to-create-a-configuration-profile-1632264580336",
|
||||
"title": "To create a configuration profile"
|
||||
}
|
||||
],
|
||||
"CreateDeploymentStrategy": [
|
||||
{
|
||||
"input": {
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"GrowthFactor": 25,
|
||||
"Name": "Example-Deployment",
|
||||
"ReplicateTo": "SSM_DOCUMENT"
|
||||
},
|
||||
"output": {
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"Id": "1225qzk",
|
||||
"Name": "Example-Deployment",
|
||||
"ReplicateTo": "SSM_DOCUMENT"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following create-deployment-strategy example creates a deployment strategy called Example-Deployment that takes 15 minutes and deploys the configuration to 25% of the application at a time. The strategy is also copied to an SSM Document.",
|
||||
"id": "to-create-a-deployment-strategy-1632264783812",
|
||||
"title": "To create a deployment strategy"
|
||||
}
|
||||
],
|
||||
"CreateEnvironment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Name": "Example-Environment"
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Id": "54j1r29",
|
||||
"Name": "Example-Environment",
|
||||
"State": "READY_FOR_DEPLOYMENT"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following create-environment example creates an AWS AppConfig environment named Example-Environment using the application you created using create-application",
|
||||
"id": "to-create-an-environment-1632265124975",
|
||||
"title": "To create an environment"
|
||||
}
|
||||
],
|
||||
"CreateHostedConfigurationVersion": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"Content": "eyAiTmFtZSI6ICJFeGFtcGxlQXBwbGljYXRpb24iLCAiSWQiOiBFeGFtcGxlSUQsICJSYW5rIjogNyB9",
|
||||
"ContentType": "text",
|
||||
"LatestVersionNumber": 1
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ContentType": "text",
|
||||
"VersionNumber": 1
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following create-hosted-configuration-version example creates a new configuration in the AWS AppConfig configuration store.",
|
||||
"id": "to-create-a-hosted-configuration-version-1632265196980",
|
||||
"title": "To create a hosted configuration version"
|
||||
}
|
||||
],
|
||||
"DeleteApplication": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following delete-application example deletes the specified application. \n",
|
||||
"id": "to-delete-an-application-1632265343951",
|
||||
"title": "To delete an application"
|
||||
}
|
||||
],
|
||||
"DeleteConfigurationProfile": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following delete-configuration-profile example deletes the specified configuration profile.",
|
||||
"id": "to-delete-a-configuration-profile-1632265401308",
|
||||
"title": "To delete a configuration profile"
|
||||
}
|
||||
],
|
||||
"DeleteDeploymentStrategy": [
|
||||
{
|
||||
"input": {
|
||||
"DeploymentStrategyId": "1225qzk"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following delete-deployment-strategy example deletes the specified deployment strategy.",
|
||||
"id": "to-delete-a-deployment-strategy-1632265473708",
|
||||
"title": "To delete a deployment strategy"
|
||||
}
|
||||
],
|
||||
"DeleteEnvironment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"EnvironmentId": "54j1r29"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following delete-environment example deletes the specified application environment.",
|
||||
"id": "to-delete-an-environment-1632265641044",
|
||||
"title": "To delete an environment"
|
||||
}
|
||||
],
|
||||
"DeleteHostedConfigurationVersion": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"VersionNumber": 1
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following delete-hosted-configuration-version example deletes a configuration version hosted in the AWS AppConfig configuration store.",
|
||||
"id": "to-delete-a-hosted-configuration-version-1632265720740",
|
||||
"title": "To delete a hosted configuration version"
|
||||
}
|
||||
],
|
||||
"GetApplication": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji"
|
||||
},
|
||||
"output": {
|
||||
"Id": "339ohji",
|
||||
"Name": "example-application"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-application example lists the details of the specified application.",
|
||||
"id": "to-list-details-of-an-application-1632265864702",
|
||||
"title": "To list details of an application"
|
||||
}
|
||||
],
|
||||
"GetConfiguration": [
|
||||
{
|
||||
"input": {
|
||||
"Application": "example-application",
|
||||
"ClientId": "example-id",
|
||||
"Configuration": "Example-Configuration-Profile",
|
||||
"Environment": "Example-Environment"
|
||||
},
|
||||
"output": {
|
||||
"ConfigurationVersion": "1",
|
||||
"ContentType": "application/octet-stream"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-configuration example returns the configuration details of the example application. On subsequent calls to get-configuration, use the client-configuration-version parameter to only update the configuration of your application if the version has changed. Only updating the configuration when the version has changed avoids excess charges incurred by calling get-configuration.",
|
||||
"id": "to-retrieve-configuration-details-1632265954314",
|
||||
"title": "To retrieve configuration details"
|
||||
}
|
||||
],
|
||||
"GetConfigurationProfile": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f"
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Id": "ur8hx2f",
|
||||
"LocationUri": "ssm-parameter://Example-Parameter",
|
||||
"Name": "Example-Configuration-Profile",
|
||||
"RetrievalRoleArn": "arn:aws:iam::111122223333:role/Example-App-Config-Role"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-configuration-profile example returns the details of the specified configuration profile.",
|
||||
"id": "to-retrieve-configuration-profile-details-1632266081013",
|
||||
"title": "To retrieve configuration profile details"
|
||||
}
|
||||
],
|
||||
"GetDeployment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"DeploymentNumber": 1,
|
||||
"EnvironmentId": "54j1r29"
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"CompletedAt": "2021-09-17T21:59:03.888000+00:00",
|
||||
"ConfigurationLocationUri": "ssm-parameter://Example-Parameter",
|
||||
"ConfigurationName": "Example-Configuration-Profile",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ConfigurationVersion": "1",
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"DeploymentNumber": 1,
|
||||
"DeploymentStrategyId": "1225qzk",
|
||||
"EnvironmentId": "54j1r29",
|
||||
"EventLog": [
|
||||
{
|
||||
"Description": "Deployment completed",
|
||||
"EventType": "DEPLOYMENT_COMPLETED",
|
||||
"OccurredAt": "2021-09-17T21:59:03.888000+00:00",
|
||||
"TriggeredBy": "APPCONFIG"
|
||||
},
|
||||
{
|
||||
"Description": "Deployment bake time started",
|
||||
"EventType": "BAKE_TIME_STARTED",
|
||||
"OccurredAt": "2021-09-17T21:58:57.722000+00:00",
|
||||
"TriggeredBy": "APPCONFIG"
|
||||
},
|
||||
{
|
||||
"Description": "Configuration available to 100.00% of clients",
|
||||
"EventType": "PERCENTAGE_UPDATED",
|
||||
"OccurredAt": "2021-09-17T21:55:56.816000+00:00",
|
||||
"TriggeredBy": "APPCONFIG"
|
||||
},
|
||||
{
|
||||
"Description": "Configuration available to 75.00% of clients",
|
||||
"EventType": "PERCENTAGE_UPDATED",
|
||||
"OccurredAt": "2021-09-17T21:52:56.567000+00:00",
|
||||
"TriggeredBy": "APPCONFIG"
|
||||
},
|
||||
{
|
||||
"Description": "Configuration available to 50.00% of clients",
|
||||
"EventType": "PERCENTAGE_UPDATED",
|
||||
"OccurredAt": "2021-09-17T21:49:55.737000+00:00",
|
||||
"TriggeredBy": "APPCONFIG"
|
||||
},
|
||||
{
|
||||
"Description": "Configuration available to 25.00% of clients",
|
||||
"EventType": "PERCENTAGE_UPDATED",
|
||||
"OccurredAt": "2021-09-17T21:46:55.187000+00:00",
|
||||
"TriggeredBy": "APPCONFIG"
|
||||
},
|
||||
{
|
||||
"Description": "Deployment started",
|
||||
"EventType": "DEPLOYMENT_STARTED",
|
||||
"OccurredAt": "2021-09-17T21:43:54.205000+00:00",
|
||||
"TriggeredBy": "USER"
|
||||
}
|
||||
],
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"PercentageComplete": 100,
|
||||
"StartedAt": "2021-09-17T21:43:54.205000+00:00",
|
||||
"State": "COMPLETE"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-deployment example lists details of the deployment to the application in the specified environment and deployment.",
|
||||
"id": "to-retrieve-deployment-details-1633976766883",
|
||||
"title": "To retrieve deployment details"
|
||||
}
|
||||
],
|
||||
"GetDeploymentStrategy": [
|
||||
{
|
||||
"input": {
|
||||
"DeploymentStrategyId": "1225qzk"
|
||||
},
|
||||
"output": {
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"Id": "1225qzk",
|
||||
"Name": "Example-Deployment",
|
||||
"ReplicateTo": "SSM_DOCUMENT"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-deployment-strategy example lists the details of the specified deployment strategy.",
|
||||
"id": "to-retrieve-details-of-a-deployment-strategy-1632266385805",
|
||||
"title": "To retrieve details of a deployment strategy"
|
||||
}
|
||||
],
|
||||
"GetEnvironment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"EnvironmentId": "54j1r29"
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Id": "54j1r29",
|
||||
"Name": "Example-Environment",
|
||||
"State": "READY_FOR_DEPLOYMENT"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-environment example returns the details and state of the specified environment.",
|
||||
"id": "to-retrieve-environment-details-1632266924806",
|
||||
"title": "To retrieve environment details"
|
||||
}
|
||||
],
|
||||
"GetHostedConfigurationVersion": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"VersionNumber": 1
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ContentType": "application/json",
|
||||
"VersionNumber": 1
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following get-hosted-configuration-version example retrieves the configuration details of the AWS AppConfig hosted configuration.",
|
||||
"id": "to-retrieve-hosted-configuration-details-1632267003527",
|
||||
"title": "To retrieve hosted configuration details"
|
||||
}
|
||||
],
|
||||
"ListApplications": [
|
||||
{
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
"Items": [
|
||||
{
|
||||
"Description": "An application used for creating an example.",
|
||||
"Id": "339ohji",
|
||||
"Name": "test-application"
|
||||
},
|
||||
{
|
||||
"Id": "rwalwu7",
|
||||
"Name": "Test-Application"
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-applications example lists the available applications in your AWS account.",
|
||||
"id": "to-list-the-available-applications-1632267111131",
|
||||
"title": "To list the available applications"
|
||||
}
|
||||
],
|
||||
"ListConfigurationProfiles": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji"
|
||||
},
|
||||
"output": {
|
||||
"Items": [
|
||||
{
|
||||
"ApplicationId": "339ohji",
|
||||
"Id": "ur8hx2f",
|
||||
"LocationUri": "ssm-parameter://Example-Parameter",
|
||||
"Name": "Example-Configuration-Profile"
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-configuration-profiles example lists the available configuration profiles for the specified application.",
|
||||
"id": "to-list-the-available-configuration-profiles-1632267193265",
|
||||
"title": "To list the available configuration profiles"
|
||||
}
|
||||
],
|
||||
"ListDeploymentStrategies": [
|
||||
{
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
"Items": [
|
||||
{
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"Id": "1225qzk",
|
||||
"Name": "Example-Deployment",
|
||||
"ReplicateTo": "SSM_DOCUMENT"
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-deployment-strategies example lists the available deployment strategies in your AWS account.",
|
||||
"id": "to-list-the-available-deployment-strategies-1632267364180",
|
||||
"title": "To list the available deployment strategies"
|
||||
}
|
||||
],
|
||||
"ListDeployments": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"EnvironmentId": "54j1r29"
|
||||
},
|
||||
"output": {
|
||||
"Items": [
|
||||
{
|
||||
"CompletedAt": "2021-09-17T21:59:03.888000+00:00",
|
||||
"ConfigurationName": "Example-Configuration-Profile",
|
||||
"ConfigurationVersion": "1",
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"DeploymentNumber": 1,
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"PercentageComplete": 100,
|
||||
"StartedAt": "2021-09-17T21:43:54.205000+00:00",
|
||||
"State": "COMPLETE"
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-deployments example lists the available deployments in your AWS account for the specified application and environment.",
|
||||
"id": "to-list-the-available-deployments-1632267282025",
|
||||
"title": "To list the available deployments"
|
||||
}
|
||||
],
|
||||
"ListEnvironments": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji"
|
||||
},
|
||||
"output": {
|
||||
"Items": [
|
||||
{
|
||||
"ApplicationId": "339ohji",
|
||||
"Id": "54j1r29",
|
||||
"Name": "Example-Environment",
|
||||
"State": "READY_FOR_DEPLOYMENT"
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-environments example lists the available environments in your AWS account for the specified application.",
|
||||
"id": "to-list-the-available-environments-1632267474389",
|
||||
"title": "To list the available environments"
|
||||
}
|
||||
],
|
||||
"ListHostedConfigurationVersions": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f"
|
||||
},
|
||||
"output": {
|
||||
"Items": [
|
||||
{
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ContentType": "application/json",
|
||||
"VersionNumber": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-hosted-configuration-versions example lists the configurations versions hosted in the AWS AppConfig hosted configuration store for the specified application and configuration profile.",
|
||||
"id": "to-list-the-available-hosted-configuration-versions-1632267647667",
|
||||
"title": "To list the available hosted configuration versions"
|
||||
}
|
||||
],
|
||||
"ListTagsForResource": [
|
||||
{
|
||||
"input": {
|
||||
"ResourceArn": "arn:aws:appconfig:us-east-1:111122223333:application/339ohji"
|
||||
},
|
||||
"output": {
|
||||
"Tags": {
|
||||
"group1": "1"
|
||||
}
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following list-tags-for-resource example lists the tags of a specified application.",
|
||||
"id": "to-list-the-tags-of-an-application-1632328796560",
|
||||
"title": "To list the tags of an application"
|
||||
}
|
||||
],
|
||||
"StartDeployment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ConfigurationVersion": "1",
|
||||
"DeploymentStrategyId": "1225qzk",
|
||||
"Description": "",
|
||||
"EnvironmentId": "54j1r29",
|
||||
"Tags": {
|
||||
}
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationLocationUri": "ssm-parameter://Example-Parameter",
|
||||
"ConfigurationName": "Example-Configuration-Profile",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ConfigurationVersion": "1",
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"DeploymentNumber": 1,
|
||||
"DeploymentStrategyId": "1225qzk",
|
||||
"EnvironmentId": "54j1r29",
|
||||
"EventLog": [
|
||||
{
|
||||
"Description": "Deployment started",
|
||||
"EventType": "DEPLOYMENT_STARTED",
|
||||
"OccurredAt": "2021-09-17T21:43:54.205000+00:00",
|
||||
"TriggeredBy": "USER"
|
||||
}
|
||||
],
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"PercentageComplete": 1.0,
|
||||
"StartedAt": "2021-09-17T21:43:54.205000+00:00",
|
||||
"State": "DEPLOYING"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following start-deployment example starts a deployment to the application using the specified environment, deployment strategy, and configuration profile.",
|
||||
"id": "to-start-a-configuration-deployment-1632328956790",
|
||||
"title": "To start a configuration deployment"
|
||||
}
|
||||
],
|
||||
"StopDeployment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"DeploymentNumber": 2,
|
||||
"EnvironmentId": "54j1r29"
|
||||
},
|
||||
"output": {
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"DeploymentNumber": 2,
|
||||
"FinalBakeTimeInMinutes": 0,
|
||||
"GrowthFactor": 25.0,
|
||||
"PercentageComplete": 1.0
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following stop-deployment example stops the deployment of an application configuration to the specified environment.",
|
||||
"id": "to-stop-configuration-deployment-1632329139126",
|
||||
"title": "To stop configuration deployment"
|
||||
}
|
||||
],
|
||||
"TagResource": [
|
||||
{
|
||||
"input": {
|
||||
"ResourceArn": "arn:aws:appconfig:us-east-1:111122223333:application/339ohji",
|
||||
"Tags": {
|
||||
"group1": "1"
|
||||
}
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following tag-resource example tags an application resource.",
|
||||
"id": "to-tag-an-application-1632330350645",
|
||||
"title": "To tag an application"
|
||||
}
|
||||
],
|
||||
"UntagResource": [
|
||||
{
|
||||
"input": {
|
||||
"ResourceArn": "arn:aws:appconfig:us-east-1:111122223333:application/339ohji",
|
||||
"TagKeys": [
|
||||
"group1"
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following untag-resource example removes the group1 tag from the specified application.",
|
||||
"id": "to-remove-a-tag-from-an-application-1632330429881",
|
||||
"title": "To remove a tag from an application"
|
||||
}
|
||||
],
|
||||
"UpdateApplication": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Description": "",
|
||||
"Name": "Example-Application"
|
||||
},
|
||||
"output": {
|
||||
"Description": "An application used for creating an example.",
|
||||
"Id": "339ohji",
|
||||
"Name": "Example-Application"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following update-application example updates the name of the specified application.",
|
||||
"id": "to-update-an-application-1632330585893",
|
||||
"title": "To update an application"
|
||||
}
|
||||
],
|
||||
"UpdateConfigurationProfile": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"Description": "Configuration profile used for examples."
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Description": "Configuration profile used for examples.",
|
||||
"Id": "ur8hx2f",
|
||||
"LocationUri": "ssm-parameter://Example-Parameter",
|
||||
"Name": "Example-Configuration-Profile",
|
||||
"RetrievalRoleArn": "arn:aws:iam::111122223333:role/Example-App-Config-Role"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following update-configuration-profile example updates the description of the specified configuration profile.",
|
||||
"id": "to-update-a-configuration-profile-1632330721974",
|
||||
"title": "To update a configuration profile"
|
||||
}
|
||||
],
|
||||
"UpdateDeploymentStrategy": [
|
||||
{
|
||||
"input": {
|
||||
"DeploymentStrategyId": "1225qzk",
|
||||
"FinalBakeTimeInMinutes": 20
|
||||
},
|
||||
"output": {
|
||||
"DeploymentDurationInMinutes": 15,
|
||||
"FinalBakeTimeInMinutes": 20,
|
||||
"GrowthFactor": 25,
|
||||
"GrowthType": "LINEAR",
|
||||
"Id": "1225qzk",
|
||||
"Name": "Example-Deployment",
|
||||
"ReplicateTo": "SSM_DOCUMENT"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following update-deployment-strategy example updates final bake time to 20 minutes in the specified deployment strategy. ::\n",
|
||||
"id": "to-update-a-deployment-strategy-1632330896602",
|
||||
"title": "To update a deployment strategy"
|
||||
}
|
||||
],
|
||||
"UpdateEnvironment": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Description": "An environment for examples.",
|
||||
"EnvironmentId": "54j1r29"
|
||||
},
|
||||
"output": {
|
||||
"ApplicationId": "339ohji",
|
||||
"Description": "An environment for examples.",
|
||||
"Id": "54j1r29",
|
||||
"Name": "Example-Environment",
|
||||
"State": "ROLLED_BACK"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following update-environment example updates an environment's description.",
|
||||
"id": "to-update-an-environment-1632331382428",
|
||||
"title": "To update an environment"
|
||||
}
|
||||
],
|
||||
"ValidateConfiguration": [
|
||||
{
|
||||
"input": {
|
||||
"ApplicationId": "abc1234",
|
||||
"ConfigurationProfileId": "ur8hx2f",
|
||||
"ConfigurationVersion": "1"
|
||||
},
|
||||
"comments": {
|
||||
},
|
||||
"description": "The following validate-configuration example uses the validators in a configuration profile to validate a configuration.",
|
||||
"id": "to-validate-a-configuration-1632331491365",
|
||||
"title": "To validate a configuration"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
+52
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListApplications": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListConfigurationProfiles": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListDeploymentStrategies": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListDeployments": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListEnvironments": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListExtensionAssociations": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListExtensions": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
},
|
||||
"ListHostedConfigurationVersions": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "Items"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+55
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"version": 2,
|
||||
"waiters": {
|
||||
"EnvironmentReadyForDeployment": {
|
||||
"operation": "GetEnvironment",
|
||||
"delay": 30,
|
||||
"maxAttempts": 999,
|
||||
"acceptors": [
|
||||
{
|
||||
"state": "success",
|
||||
"matcher": "path",
|
||||
"argument": "State",
|
||||
"expected": "ReadyForDeployment"
|
||||
},
|
||||
{
|
||||
"state": "failure",
|
||||
"matcher": "path",
|
||||
"argument": "State",
|
||||
"expected": "RolledBack"
|
||||
},
|
||||
{
|
||||
"state": "failure",
|
||||
"matcher": "path",
|
||||
"argument": "State",
|
||||
"expected": "Reverted"
|
||||
}
|
||||
]
|
||||
},
|
||||
"DeploymentComplete": {
|
||||
"operation": "GetDeployment",
|
||||
"delay": 30,
|
||||
"maxAttempts": 999,
|
||||
"acceptors": [
|
||||
{
|
||||
"state": "success",
|
||||
"matcher": "path",
|
||||
"argument": "State",
|
||||
"expected": "COMPLETE"
|
||||
},
|
||||
{
|
||||
"state": "failure",
|
||||
"matcher": "path",
|
||||
"argument": "State",
|
||||
"expected": "ROLLED_BACK"
|
||||
},
|
||||
{
|
||||
"state": "failure",
|
||||
"matcher": "path",
|
||||
"argument": "State",
|
||||
"expected": "REVERTED"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"pagination": {}
|
||||
}
|
||||
LFS
BIN
Binary file not shown.
BIN
Binary file not shown.
+28
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListAppAuthorizations": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "appAuthorizationSummaryList"
|
||||
},
|
||||
"ListAppBundles": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "appBundleSummaryList"
|
||||
},
|
||||
"ListIngestionDestinations": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "ingestionDestinations"
|
||||
},
|
||||
"ListIngestions": {
|
||||
"input_token": "nextToken",
|
||||
"output_token": "nextToken",
|
||||
"limit_key": "maxResults",
|
||||
"result_key": "ingestions"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": 2,
|
||||
"waiters": {
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"pagination": {}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+40
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListApplications": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "Applications"
|
||||
},
|
||||
"ListDataIntegrationAssociations": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "DataIntegrationAssociations"
|
||||
},
|
||||
"ListDataIntegrations": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "DataIntegrations"
|
||||
},
|
||||
"ListEventIntegrationAssociations": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "EventIntegrationAssociations"
|
||||
},
|
||||
"ListEventIntegrations": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "EventIntegrations"
|
||||
},
|
||||
"ListApplicationAssociations": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "ApplicationAssociations"
|
||||
}
|
||||
}
|
||||
}
|
||||
scripts/addons/Rokoko Libraries/python311/botocore/data/appintegrations/2020-07-29/service-2.json.gz
LFS
BIN
Binary file not shown.
BIN
Binary file not shown.
+221
@@ -0,0 +1,221 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
"DeleteScalingPolicy": [
|
||||
{
|
||||
"input": {
|
||||
"PolicyName": "web-app-cpu-lt-25",
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs"
|
||||
},
|
||||
"output": {
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "This example deletes a scaling policy for the Amazon ECS service called web-app, which is running in the default cluster.",
|
||||
"id": "to-delete-a-scaling-policy-1470863892689",
|
||||
"title": "To delete a scaling policy"
|
||||
}
|
||||
],
|
||||
"DeregisterScalableTarget": [
|
||||
{
|
||||
"input": {
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs"
|
||||
},
|
||||
"output": {
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "This example deregisters a scalable target for an Amazon ECS service called web-app that is running in the default cluster.",
|
||||
"id": "to-deregister-a-scalable-target-1470864164895",
|
||||
"title": "To deregister a scalable target"
|
||||
}
|
||||
],
|
||||
"DescribeScalableTargets": [
|
||||
{
|
||||
"input": {
|
||||
"ServiceNamespace": "ecs"
|
||||
},
|
||||
"output": {
|
||||
"ScalableTargets": [
|
||||
{
|
||||
"CreationTime": "2019-05-06T11:21:46.199Z",
|
||||
"MaxCapacity": 10,
|
||||
"MinCapacity": 1,
|
||||
"ResourceId": "service/default/web-app",
|
||||
"RoleARN": "arn:aws:iam::012345678910:role/aws-service-role/ecs.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_ECSService",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs",
|
||||
"SuspendedState": {
|
||||
"DynamicScalingInSuspended": false,
|
||||
"DynamicScalingOutSuspended": false,
|
||||
"ScheduledScalingSuspended": false
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "This example describes the scalable targets for the ECS service namespace.",
|
||||
"id": "to-describe-scalable-targets-1470864286961",
|
||||
"title": "To describe scalable targets"
|
||||
}
|
||||
],
|
||||
"DescribeScalingActivities": [
|
||||
{
|
||||
"input": {
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs"
|
||||
},
|
||||
"output": {
|
||||
"ScalingActivities": [
|
||||
{
|
||||
"ActivityId": "e6c5f7d1-dbbb-4a3f-89b2-51f33e766399",
|
||||
"Cause": "monitor alarm web-app-cpu-lt-25 in state ALARM triggered policy web-app-cpu-lt-25",
|
||||
"Description": "Setting desired count to 1.",
|
||||
"EndTime": "2019-05-06T16:04:32.111Z",
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs",
|
||||
"StartTime": "2019-05-06T16:03:58.171Z",
|
||||
"StatusCode": "Successful",
|
||||
"StatusMessage": "Successfully set desired count to 1. Change successfully fulfilled by ecs."
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "This example describes the scaling activities for an Amazon ECS service called web-app that is running in the default cluster.",
|
||||
"id": "to-describe-scaling-activities-for-a-scalable-target-1470864398629",
|
||||
"title": "To describe scaling activities for a scalable target"
|
||||
}
|
||||
],
|
||||
"DescribeScalingPolicies": [
|
||||
{
|
||||
"input": {
|
||||
"ServiceNamespace": "ecs"
|
||||
},
|
||||
"output": {
|
||||
"NextToken": "",
|
||||
"ScalingPolicies": [
|
||||
{
|
||||
"Alarms": [
|
||||
{
|
||||
"AlarmARN": "arn:aws:cloudwatch:us-west-2:012345678910:alarm:web-app-cpu-gt-75",
|
||||
"AlarmName": "web-app-cpu-gt-75"
|
||||
}
|
||||
],
|
||||
"CreationTime": "2019-05-06T12:11:39.230Z",
|
||||
"PolicyARN": "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/web-app-cpu-gt-75",
|
||||
"PolicyName": "web-app-cpu-gt-75",
|
||||
"PolicyType": "StepScaling",
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs",
|
||||
"StepScalingPolicyConfiguration": {
|
||||
"AdjustmentType": "PercentChangeInCapacity",
|
||||
"Cooldown": 60,
|
||||
"StepAdjustments": [
|
||||
{
|
||||
"MetricIntervalLowerBound": 0,
|
||||
"ScalingAdjustment": 200
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "This example describes the scaling policies for the ECS service namespace.",
|
||||
"id": "to-describe-scaling-policies-1470864609734",
|
||||
"title": "To describe scaling policies"
|
||||
}
|
||||
],
|
||||
"PutScalingPolicy": [
|
||||
{
|
||||
"input": {
|
||||
"PolicyName": "cpu75-target-tracking-scaling-policy",
|
||||
"PolicyType": "TargetTrackingScaling",
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs",
|
||||
"TargetTrackingScalingPolicyConfiguration": {
|
||||
"PredefinedMetricSpecification": {
|
||||
"PredefinedMetricType": "ECSServiceAverageCPUUtilization"
|
||||
},
|
||||
"ScaleInCooldown": 60,
|
||||
"ScaleOutCooldown": 60,
|
||||
"TargetValue": 75
|
||||
}
|
||||
},
|
||||
"output": {
|
||||
"Alarms": [
|
||||
{
|
||||
"AlarmARN": "arn:aws:cloudwatch:us-west-2:012345678910:alarm:TargetTracking-service/default/web-app-AlarmHigh-d4f0770c-b46e-434a-a60f-3b36d653feca",
|
||||
"AlarmName": "TargetTracking-service/default/web-app-AlarmHigh-d4f0770c-b46e-434a-a60f-3b36d653feca"
|
||||
},
|
||||
{
|
||||
"AlarmARN": "arn:aws:cloudwatch:us-west-2:012345678910:alarm:TargetTracking-service/default/web-app-AlarmLow-1b437334-d19b-4a63-a812-6c67aaf2910d",
|
||||
"AlarmName": "TargetTracking-service/default/web-app-AlarmLow-1b437334-d19b-4a63-a812-6c67aaf2910d"
|
||||
}
|
||||
],
|
||||
"PolicyARN": "arn:aws:autoscaling:us-west-2:012345678910:scalingPolicy:6d8972f3-efc8-437c-92d1-6270f29a66e7:resource/ecs/service/default/web-app:policyName/cpu75-target-tracking-scaling-policy"
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "The following example applies a target tracking scaling policy with a predefined metric specification to an Amazon ECS service called web-app in the default cluster. The policy keeps the average CPU utilization of the service at 75 percent, with scale-out and scale-in cooldown periods of 60 seconds.",
|
||||
"id": "to-apply-a-target-tracking-scaling-policy-with-a-predefined-metric-specification-1569364247984",
|
||||
"title": "To apply a target tracking scaling policy with a predefined metric specification"
|
||||
}
|
||||
],
|
||||
"RegisterScalableTarget": [
|
||||
{
|
||||
"input": {
|
||||
"MaxCapacity": 10,
|
||||
"MinCapacity": 1,
|
||||
"ResourceId": "service/default/web-app",
|
||||
"ScalableDimension": "ecs:service:DesiredCount",
|
||||
"ServiceNamespace": "ecs"
|
||||
},
|
||||
"comments": {
|
||||
"input": {
|
||||
},
|
||||
"output": {
|
||||
}
|
||||
},
|
||||
"description": "This example registers a scalable target from an Amazon ECS service called web-app that is running on the default cluster, with a minimum desired count of 1 task and a maximum desired count of 10 tasks.",
|
||||
"id": "to-register-a-new-scalable-target-1470864910380",
|
||||
"title": "To register an ECS service as a scalable target"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
+28
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"pagination": {
|
||||
"DescribeScalableTargets": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ScalableTargets"
|
||||
},
|
||||
"DescribeScalingActivities": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ScalingActivities"
|
||||
},
|
||||
"DescribeScalingPolicies": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ScalingPolicies"
|
||||
},
|
||||
"DescribeScheduledActions": {
|
||||
"input_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"output_token": "NextToken",
|
||||
"result_key": "ScheduledActions"
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+5
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"examples": {
|
||||
}
|
||||
}
|
||||
+3
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"pagination": {}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+40
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"pagination": {
|
||||
"ListServiceDependencies": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ServiceDependencies"
|
||||
},
|
||||
"ListServiceDependents": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ServiceDependents"
|
||||
},
|
||||
"ListServiceLevelObjectives": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "SloSummaries"
|
||||
},
|
||||
"ListServiceOperations": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ServiceOperations"
|
||||
},
|
||||
"ListServices": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ServiceSummaries"
|
||||
},
|
||||
"ListServiceLevelObjectiveExclusionWindows": {
|
||||
"input_token": "NextToken",
|
||||
"output_token": "NextToken",
|
||||
"limit_key": "MaxResults",
|
||||
"result_key": "ExclusionWindows"
|
||||
}
|
||||
}
|
||||
}
|
||||
+31
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"version": 1.0,
|
||||
"merge": {
|
||||
"pagination": {
|
||||
"ListServiceDependencies": {
|
||||
"non_aggregate_keys": [
|
||||
"StartTime",
|
||||
"EndTime"
|
||||
]
|
||||
},
|
||||
"ListServiceDependents": {
|
||||
"non_aggregate_keys": [
|
||||
"StartTime",
|
||||
"EndTime"
|
||||
]
|
||||
},
|
||||
"ListServiceOperations": {
|
||||
"non_aggregate_keys": [
|
||||
"StartTime",
|
||||
"EndTime"
|
||||
]
|
||||
},
|
||||
"ListServices": {
|
||||
"non_aggregate_keys": [
|
||||
"StartTime",
|
||||
"EndTime"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
BIN
Binary file not shown.
BIN
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user