2025-12-01
This commit is contained in:
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 Alex Grönholm
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
@@ -0,0 +1,105 @@
|
||||
Metadata-Version: 2.2
|
||||
Name: anyio
|
||||
Version: 4.9.0
|
||||
Summary: High level compatibility layer for multiple asynchronous event loop implementations
|
||||
Author-email: Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
License: MIT
|
||||
Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/
|
||||
Project-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html
|
||||
Project-URL: Source code, https://github.com/agronholm/anyio
|
||||
Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Framework :: AnyIO
|
||||
Classifier: Typing :: Typed
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Requires-Python: >=3.9
|
||||
Description-Content-Type: text/x-rst
|
||||
License-File: LICENSE
|
||||
Requires-Dist: exceptiongroup>=1.0.2; python_version < "3.11"
|
||||
Requires-Dist: idna>=2.8
|
||||
Requires-Dist: sniffio>=1.1
|
||||
Requires-Dist: typing_extensions>=4.5; python_version < "3.13"
|
||||
Provides-Extra: trio
|
||||
Requires-Dist: trio>=0.26.1; extra == "trio"
|
||||
Provides-Extra: test
|
||||
Requires-Dist: anyio[trio]; extra == "test"
|
||||
Requires-Dist: blockbuster>=1.5.23; extra == "test"
|
||||
Requires-Dist: coverage[toml]>=7; extra == "test"
|
||||
Requires-Dist: exceptiongroup>=1.2.0; extra == "test"
|
||||
Requires-Dist: hypothesis>=4.0; extra == "test"
|
||||
Requires-Dist: psutil>=5.9; extra == "test"
|
||||
Requires-Dist: pytest>=7.0; extra == "test"
|
||||
Requires-Dist: trustme; extra == "test"
|
||||
Requires-Dist: truststore>=0.9.1; python_version >= "3.10" and extra == "test"
|
||||
Requires-Dist: uvloop>=0.21; (platform_python_implementation == "CPython" and platform_system != "Windows" and python_version < "3.14") and extra == "test"
|
||||
Provides-Extra: doc
|
||||
Requires-Dist: packaging; extra == "doc"
|
||||
Requires-Dist: Sphinx~=8.2; extra == "doc"
|
||||
Requires-Dist: sphinx_rtd_theme; extra == "doc"
|
||||
Requires-Dist: sphinx-autodoc-typehints>=1.2.0; extra == "doc"
|
||||
|
||||
.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg
|
||||
:target: https://github.com/agronholm/anyio/actions/workflows/test.yml
|
||||
:alt: Build Status
|
||||
.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master
|
||||
:target: https://coveralls.io/github/agronholm/anyio?branch=master
|
||||
:alt: Code Coverage
|
||||
.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest
|
||||
:target: https://anyio.readthedocs.io/en/latest/?badge=latest
|
||||
:alt: Documentation
|
||||
.. image:: https://badges.gitter.im/gitterHQ/gitter.svg
|
||||
:target: https://gitter.im/python-trio/AnyIO
|
||||
:alt: Gitter chat
|
||||
|
||||
AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or
|
||||
trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony
|
||||
with the native SC of trio itself.
|
||||
|
||||
Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or
|
||||
trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full
|
||||
refactoring necessary. It will blend in with the native libraries of your chosen backend.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
View full documentation at: https://anyio.readthedocs.io/
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
AnyIO offers the following functionality:
|
||||
|
||||
* Task groups (nurseries_ in trio terminology)
|
||||
* High-level networking (TCP, UDP and UNIX sockets)
|
||||
|
||||
* `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python
|
||||
3.8)
|
||||
* async/await style UDP sockets (unlike asyncio where you still have to use Transports and
|
||||
Protocols)
|
||||
|
||||
* A versatile API for byte streams and object streams
|
||||
* Inter-task synchronization and communication (locks, conditions, events, semaphores, object
|
||||
streams)
|
||||
* Worker threads
|
||||
* Subprocesses
|
||||
* Asynchronous file I/O (using worker threads)
|
||||
* Signal handling
|
||||
|
||||
AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.
|
||||
It even works with the popular Hypothesis_ library.
|
||||
|
||||
.. _asyncio: https://docs.python.org/3/library/asyncio.html
|
||||
.. _trio: https://github.com/python-trio/trio
|
||||
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning
|
||||
.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs
|
||||
.. _pytest: https://docs.pytest.org/en/latest/
|
||||
.. _Hypothesis: https://hypothesis.works/
|
||||
@@ -0,0 +1,88 @@
|
||||
anyio-4.9.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
anyio-4.9.0.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081
|
||||
anyio-4.9.0.dist-info/METADATA,sha256=vvkWPXXTbrpTCFK7zdcYwQcSQhx6Q4qITM9t_PEQCrY,4682
|
||||
anyio-4.9.0.dist-info/RECORD,,
|
||||
anyio-4.9.0.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
|
||||
anyio-4.9.0.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39
|
||||
anyio-4.9.0.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6
|
||||
anyio/__init__.py,sha256=t8bZuNXa5ncwXBaNKbv48BDgZt48RT_zCEtrnPmjNU8,4993
|
||||
anyio/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/__pycache__/from_thread.cpython-311.pyc,,
|
||||
anyio/__pycache__/lowlevel.cpython-311.pyc,,
|
||||
anyio/__pycache__/pytest_plugin.cpython-311.pyc,,
|
||||
anyio/__pycache__/to_interpreter.cpython-311.pyc,,
|
||||
anyio/__pycache__/to_process.cpython-311.pyc,,
|
||||
anyio/__pycache__/to_thread.cpython-311.pyc,,
|
||||
anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/_backends/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/_backends/__pycache__/_asyncio.cpython-311.pyc,,
|
||||
anyio/_backends/__pycache__/_trio.cpython-311.pyc,,
|
||||
anyio/_backends/_asyncio.py,sha256=AT1oaTfCE-9YFxooMlvld2yDqY5U2A-ANMcBDh9eRfI,93455
|
||||
anyio/_backends/_trio.py,sha256=HVfDqRGQ7Xj3JfTcYdgzmC7pZEplqU4NOO5kxNNSZnk,40429
|
||||
anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/_core/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_asyncio_selector_thread.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_eventloop.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_exceptions.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_fileio.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_resources.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_signals.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_sockets.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_streams.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_subprocesses.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_synchronization.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_tasks.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_tempfile.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_testing.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_typedattr.cpython-311.pyc,,
|
||||
anyio/_core/_asyncio_selector_thread.py,sha256=2PdxFM3cs02Kp6BSppbvmRT7q7asreTW5FgBxEsflBo,5626
|
||||
anyio/_core/_eventloop.py,sha256=t_tAwBFPjF8jrZGjlJ6bbYy6KA3bjsbZxV9mvh9t1i0,4695
|
||||
anyio/_core/_exceptions.py,sha256=RlPRlwastdmfDPoskdXNO6SI8_l3fclA2wtW6cokU9I,3503
|
||||
anyio/_core/_fileio.py,sha256=qFZhkLIz0cGXluvih_vcPUTucgq8UFVgsTCtYbijZIg,23340
|
||||
anyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435
|
||||
anyio/_core/_signals.py,sha256=vulT1M1xdLYtAR-eY5TamIgaf1WTlOwOrMGwswlTTr8,905
|
||||
anyio/_core/_sockets.py,sha256=5Okc_UThGDEN9KCnsIhqWPRHBNuSy6b4NmG1i51TVF4,27150
|
||||
anyio/_core/_streams.py,sha256=OnaKgoDD-FcMSwLvkoAUGP51sG2ZdRvMpxt9q2w1gYA,1804
|
||||
anyio/_core/_subprocesses.py,sha256=EXm5igL7dj55iYkPlbYVAqtbqxJxjU-6OndSTIx9SRg,8047
|
||||
anyio/_core/_synchronization.py,sha256=DwUh8Tl6cG_UMVC_GyzPoC_U9BpfDfjMl9SINSxcZN4,20320
|
||||
anyio/_core/_tasks.py,sha256=f3CuWwo06cCZ6jaOv-JHFKWkgpgf2cvaF25Oh4augMA,4757
|
||||
anyio/_core/_tempfile.py,sha256=s-_ucacXbxBH5Bo5eo65lN0lPwZQd5B8yNN_9nARpCM,19696
|
||||
anyio/_core/_testing.py,sha256=YUGwA5cgFFbUTv4WFd7cv_BSVr4ryTtPp8owQA3JdWE,2118
|
||||
anyio/_core/_typedattr.py,sha256=P4ozZikn3-DbpoYcvyghS_FOYAgbmUxeoU8-L_07pZM,2508
|
||||
anyio/abc/__init__.py,sha256=c2OQbTCS_fQowviMXanLPh8m29ccwkXmpDr7uyNZYOo,2652
|
||||
anyio/abc/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_eventloop.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_resources.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_sockets.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_streams.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_subprocesses.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_tasks.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_testing.cpython-311.pyc,,
|
||||
anyio/abc/_eventloop.py,sha256=UmL8DZCvQTgxzmyBZcGm9kWj9VQY8BMWueLh5S8yWN4,9682
|
||||
anyio/abc/_resources.py,sha256=DrYvkNN1hH6Uvv5_5uKySvDsnknGVDe8FCKfko0VtN8,783
|
||||
anyio/abc/_sockets.py,sha256=KhWtJxan8jpBXKwPaFeQzI4iRXdFaOIn0HXtDZnaO7U,6262
|
||||
anyio/abc/_streams.py,sha256=He_JpkAW2g5veOzcUq0XsRC2nId_i35L-d8cs7Uj1ZQ,6598
|
||||
anyio/abc/_subprocesses.py,sha256=cumAPJTktOQtw63IqG0lDpyZqu_l1EElvQHMiwJgL08,2067
|
||||
anyio/abc/_tasks.py,sha256=yJWbMwowvqjlAX4oJ3l9Is1w-zwynr2lX1Z02AWJqsY,3080
|
||||
anyio/abc/_testing.py,sha256=tBJUzkSfOXJw23fe8qSJ03kJlShOYjjaEyFB6k6MYT8,1821
|
||||
anyio/from_thread.py,sha256=MbXHZpgM9wgsRkbGhMNMomEGYj7Y_QYq6a5BZ3c5Ev8,17478
|
||||
anyio/lowlevel.py,sha256=nkgmW--SdxGVp0cmLUYazjkigveRm5HY7-gW8Bpp9oY,4169
|
||||
anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/pytest_plugin.py,sha256=qXNwk9Pa7hPQKWocgLl9qijqKGMkGzdH2wJa-jPkGUM,9375
|
||||
anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/streams/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/buffered.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/file.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/memory.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/stapled.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/text.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/tls.cpython-311.pyc,,
|
||||
anyio/streams/buffered.py,sha256=UCldKC168YuLvT7n3HtNPnQ2iWAMSTYQWbZvzLwMwkM,4500
|
||||
anyio/streams/file.py,sha256=6uoTNb5KbMoj-6gS3_xrrL8uZN8Q4iIvOS1WtGyFfKw,4383
|
||||
anyio/streams/memory.py,sha256=o1OVVx0OooteTTe2GytJreum93Ucuw5s4cAsr3X0-Ag,10560
|
||||
anyio/streams/stapled.py,sha256=U09pCrmOw9kkNhe6tKopsm1QIMT1lFTFvtb-A7SIe4k,4302
|
||||
anyio/streams/text.py,sha256=6x8w8xlfCZKTUWQoJiMPoMhSSJFUBRKgoBNSBtbd9yg,5094
|
||||
anyio/streams/tls.py,sha256=HxzpVmUgo8SUSIBass_lvef1pAI1uRSrnysM3iEGzl4,13199
|
||||
anyio/to_interpreter.py,sha256=UhuNCIucCRN7ZtyJg35Mlamzs1JpgDvK4xnL4TDWrAo,6527
|
||||
anyio/to_process.py,sha256=ZvruelRM-HNmqDaql4sdNODg2QD_uSlwSCxnV4OhsfQ,9595
|
||||
anyio/to_thread.py,sha256=WM2JQ2MbVsd5D5CM08bQiTwzZIvpsGjfH1Fy247KoDQ,2396
|
||||
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (76.0.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
[pytest11]
|
||||
anyio = anyio.pytest_plugin
|
||||
@@ -0,0 +1 @@
|
||||
anyio
|
||||
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ._core._eventloop import current_time as current_time
|
||||
from ._core._eventloop import get_all_backends as get_all_backends
|
||||
from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
|
||||
from ._core._eventloop import run as run
|
||||
from ._core._eventloop import sleep as sleep
|
||||
from ._core._eventloop import sleep_forever as sleep_forever
|
||||
from ._core._eventloop import sleep_until as sleep_until
|
||||
from ._core._exceptions import BrokenResourceError as BrokenResourceError
|
||||
from ._core._exceptions import BrokenWorkerIntepreter as BrokenWorkerIntepreter
|
||||
from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
|
||||
from ._core._exceptions import BusyResourceError as BusyResourceError
|
||||
from ._core._exceptions import ClosedResourceError as ClosedResourceError
|
||||
from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
|
||||
from ._core._exceptions import EndOfStream as EndOfStream
|
||||
from ._core._exceptions import IncompleteRead as IncompleteRead
|
||||
from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
|
||||
from ._core._exceptions import WouldBlock as WouldBlock
|
||||
from ._core._fileio import AsyncFile as AsyncFile
|
||||
from ._core._fileio import Path as Path
|
||||
from ._core._fileio import open_file as open_file
|
||||
from ._core._fileio import wrap_file as wrap_file
|
||||
from ._core._resources import aclose_forcefully as aclose_forcefully
|
||||
from ._core._signals import open_signal_receiver as open_signal_receiver
|
||||
from ._core._sockets import connect_tcp as connect_tcp
|
||||
from ._core._sockets import connect_unix as connect_unix
|
||||
from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
|
||||
from ._core._sockets import (
|
||||
create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
|
||||
)
|
||||
from ._core._sockets import create_tcp_listener as create_tcp_listener
|
||||
from ._core._sockets import create_udp_socket as create_udp_socket
|
||||
from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
|
||||
from ._core._sockets import create_unix_listener as create_unix_listener
|
||||
from ._core._sockets import getaddrinfo as getaddrinfo
|
||||
from ._core._sockets import getnameinfo as getnameinfo
|
||||
from ._core._sockets import wait_readable as wait_readable
|
||||
from ._core._sockets import wait_socket_readable as wait_socket_readable
|
||||
from ._core._sockets import wait_socket_writable as wait_socket_writable
|
||||
from ._core._sockets import wait_writable as wait_writable
|
||||
from ._core._streams import create_memory_object_stream as create_memory_object_stream
|
||||
from ._core._subprocesses import open_process as open_process
|
||||
from ._core._subprocesses import run_process as run_process
|
||||
from ._core._synchronization import CapacityLimiter as CapacityLimiter
|
||||
from ._core._synchronization import (
|
||||
CapacityLimiterStatistics as CapacityLimiterStatistics,
|
||||
)
|
||||
from ._core._synchronization import Condition as Condition
|
||||
from ._core._synchronization import ConditionStatistics as ConditionStatistics
|
||||
from ._core._synchronization import Event as Event
|
||||
from ._core._synchronization import EventStatistics as EventStatistics
|
||||
from ._core._synchronization import Lock as Lock
|
||||
from ._core._synchronization import LockStatistics as LockStatistics
|
||||
from ._core._synchronization import ResourceGuard as ResourceGuard
|
||||
from ._core._synchronization import Semaphore as Semaphore
|
||||
from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
|
||||
from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
|
||||
from ._core._tasks import CancelScope as CancelScope
|
||||
from ._core._tasks import create_task_group as create_task_group
|
||||
from ._core._tasks import current_effective_deadline as current_effective_deadline
|
||||
from ._core._tasks import fail_after as fail_after
|
||||
from ._core._tasks import move_on_after as move_on_after
|
||||
from ._core._tempfile import NamedTemporaryFile as NamedTemporaryFile
|
||||
from ._core._tempfile import SpooledTemporaryFile as SpooledTemporaryFile
|
||||
from ._core._tempfile import TemporaryDirectory as TemporaryDirectory
|
||||
from ._core._tempfile import TemporaryFile as TemporaryFile
|
||||
from ._core._tempfile import gettempdir as gettempdir
|
||||
from ._core._tempfile import gettempdirb as gettempdirb
|
||||
from ._core._tempfile import mkdtemp as mkdtemp
|
||||
from ._core._tempfile import mkstemp as mkstemp
|
||||
from ._core._testing import TaskInfo as TaskInfo
|
||||
from ._core._testing import get_current_task as get_current_task
|
||||
from ._core._testing import get_running_tasks as get_running_tasks
|
||||
from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
|
||||
from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
|
||||
from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
|
||||
from ._core._typedattr import typed_attribute as typed_attribute
|
||||
|
||||
# Re-export imports so they look like they live directly in this package
|
||||
for __value in list(locals().values()):
|
||||
if getattr(__value, "__module__", "").startswith("anyio."):
|
||||
__value.__module__ = __name__
|
||||
|
||||
del __value
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,167 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import socket
|
||||
import threading
|
||||
from collections.abc import Callable
|
||||
from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import FileDescriptorLike
|
||||
|
||||
_selector_lock = threading.Lock()
|
||||
_selector: Selector | None = None
|
||||
|
||||
|
||||
class Selector:
|
||||
def __init__(self) -> None:
|
||||
self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")
|
||||
self._selector = DefaultSelector()
|
||||
self._send, self._receive = socket.socketpair()
|
||||
self._send.setblocking(False)
|
||||
self._receive.setblocking(False)
|
||||
# This somewhat reduces the amount of memory wasted queueing up data
|
||||
# for wakeups. With these settings, maximum number of 1-byte sends
|
||||
# before getting BlockingIOError:
|
||||
# Linux 4.8: 6
|
||||
# macOS (darwin 15.5): 1
|
||||
# Windows 10: 525347
|
||||
# Windows you're weird. (And on Windows setting SNDBUF to 0 makes send
|
||||
# blocking, even on non-blocking sockets, so don't do that.)
|
||||
self._receive.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1)
|
||||
self._send.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
|
||||
# On Windows this is a TCP socket so this might matter. On other
|
||||
# platforms this fails b/c AF_UNIX sockets aren't actually TCP.
|
||||
try:
|
||||
self._send.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
self._selector.register(self._receive, EVENT_READ)
|
||||
self._closed = False
|
||||
|
||||
def start(self) -> None:
|
||||
self._thread.start()
|
||||
threading._register_atexit(self._stop) # type: ignore[attr-defined]
|
||||
|
||||
def _stop(self) -> None:
|
||||
global _selector
|
||||
self._closed = True
|
||||
self._notify_self()
|
||||
self._send.close()
|
||||
self._thread.join()
|
||||
self._selector.unregister(self._receive)
|
||||
self._receive.close()
|
||||
self._selector.close()
|
||||
_selector = None
|
||||
assert not self._selector.get_map(), (
|
||||
"selector still has registered file descriptors after shutdown"
|
||||
)
|
||||
|
||||
def _notify_self(self) -> None:
|
||||
try:
|
||||
self._send.send(b"\x00")
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
|
||||
loop = asyncio.get_running_loop()
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})
|
||||
else:
|
||||
if EVENT_READ in key.data:
|
||||
raise ValueError(
|
||||
"this file descriptor is already registered for reading"
|
||||
)
|
||||
|
||||
key.data[EVENT_READ] = loop, callback
|
||||
self._selector.modify(fd, key.events | EVENT_READ, key.data)
|
||||
|
||||
self._notify_self()
|
||||
|
||||
def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
|
||||
loop = asyncio.get_running_loop()
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})
|
||||
else:
|
||||
if EVENT_WRITE in key.data:
|
||||
raise ValueError(
|
||||
"this file descriptor is already registered for writing"
|
||||
)
|
||||
|
||||
key.data[EVENT_WRITE] = loop, callback
|
||||
self._selector.modify(fd, key.events | EVENT_WRITE, key.data)
|
||||
|
||||
self._notify_self()
|
||||
|
||||
def remove_reader(self, fd: FileDescriptorLike) -> bool:
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if new_events := key.events ^ EVENT_READ:
|
||||
del key.data[EVENT_READ]
|
||||
self._selector.modify(fd, new_events, key.data)
|
||||
else:
|
||||
self._selector.unregister(fd)
|
||||
|
||||
return True
|
||||
|
||||
def remove_writer(self, fd: FileDescriptorLike) -> bool:
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if new_events := key.events ^ EVENT_WRITE:
|
||||
del key.data[EVENT_WRITE]
|
||||
self._selector.modify(fd, new_events, key.data)
|
||||
else:
|
||||
self._selector.unregister(fd)
|
||||
|
||||
return True
|
||||
|
||||
def run(self) -> None:
|
||||
while not self._closed:
|
||||
for key, events in self._selector.select():
|
||||
if key.fileobj is self._receive:
|
||||
try:
|
||||
while self._receive.recv(4096):
|
||||
pass
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
continue
|
||||
|
||||
if events & EVENT_READ:
|
||||
loop, callback = key.data[EVENT_READ]
|
||||
self.remove_reader(key.fd)
|
||||
try:
|
||||
loop.call_soon_threadsafe(callback)
|
||||
except RuntimeError:
|
||||
pass # the loop was already closed
|
||||
|
||||
if events & EVENT_WRITE:
|
||||
loop, callback = key.data[EVENT_WRITE]
|
||||
self.remove_writer(key.fd)
|
||||
try:
|
||||
loop.call_soon_threadsafe(callback)
|
||||
except RuntimeError:
|
||||
pass # the loop was already closed
|
||||
|
||||
|
||||
def get_selector() -> Selector:
|
||||
global _selector
|
||||
|
||||
with _selector_lock:
|
||||
if _selector is None:
|
||||
_selector = Selector()
|
||||
_selector.start()
|
||||
|
||||
return _selector
|
||||
@@ -0,0 +1,166 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
import threading
|
||||
from collections.abc import Awaitable, Callable, Generator
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
|
||||
import sniffio
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..abc import AsyncBackend
|
||||
|
||||
# This must be updated when new backends are introduced
|
||||
BACKENDS = "asyncio", "trio"
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
threadlocals = threading.local()
|
||||
loaded_backends: dict[str, type[AsyncBackend]] = {}
|
||||
|
||||
|
||||
def run(
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
backend: str = "asyncio",
|
||||
backend_options: dict[str, Any] | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Run the given coroutine function in an asynchronous event loop.
|
||||
|
||||
The current thread must not be already running an event loop.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to ``func``
|
||||
:param backend: name of the asynchronous event loop implementation – currently
|
||||
either ``asyncio`` or ``trio``
|
||||
:param backend_options: keyword arguments to call the backend ``run()``
|
||||
implementation with (documented :ref:`here <backend options>`)
|
||||
:return: the return value of the coroutine function
|
||||
:raises RuntimeError: if an asynchronous event loop is already running in this
|
||||
thread
|
||||
:raises LookupError: if the named backend is not found
|
||||
|
||||
"""
|
||||
try:
|
||||
asynclib_name = sniffio.current_async_library()
|
||||
except sniffio.AsyncLibraryNotFoundError:
|
||||
pass
|
||||
else:
|
||||
raise RuntimeError(f"Already running {asynclib_name} in this thread")
|
||||
|
||||
try:
|
||||
async_backend = get_async_backend(backend)
|
||||
except ImportError as exc:
|
||||
raise LookupError(f"No such backend: {backend}") from exc
|
||||
|
||||
token = None
|
||||
if sniffio.current_async_library_cvar.get(None) is None:
|
||||
# Since we're in control of the event loop, we can cache the name of the async
|
||||
# library
|
||||
token = sniffio.current_async_library_cvar.set(backend)
|
||||
|
||||
try:
|
||||
backend_options = backend_options or {}
|
||||
return async_backend.run(func, args, {}, backend_options)
|
||||
finally:
|
||||
if token:
|
||||
sniffio.current_async_library_cvar.reset(token)
|
||||
|
||||
|
||||
async def sleep(delay: float) -> None:
|
||||
"""
|
||||
Pause the current task for the specified duration.
|
||||
|
||||
:param delay: the duration, in seconds
|
||||
|
||||
"""
|
||||
return await get_async_backend().sleep(delay)
|
||||
|
||||
|
||||
async def sleep_forever() -> None:
|
||||
"""
|
||||
Pause the current task until it's cancelled.
|
||||
|
||||
This is a shortcut for ``sleep(math.inf)``.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
await sleep(math.inf)
|
||||
|
||||
|
||||
async def sleep_until(deadline: float) -> None:
|
||||
"""
|
||||
Pause the current task until the given time.
|
||||
|
||||
:param deadline: the absolute time to wake up at (according to the internal
|
||||
monotonic clock of the event loop)
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
now = current_time()
|
||||
await sleep(max(deadline - now, 0))
|
||||
|
||||
|
||||
def current_time() -> float:
|
||||
"""
|
||||
Return the current value of the event loop's internal clock.
|
||||
|
||||
:return: the clock value (seconds)
|
||||
|
||||
"""
|
||||
return get_async_backend().current_time()
|
||||
|
||||
|
||||
def get_all_backends() -> tuple[str, ...]:
|
||||
"""Return a tuple of the names of all built-in backends."""
|
||||
return BACKENDS
|
||||
|
||||
|
||||
def get_cancelled_exc_class() -> type[BaseException]:
|
||||
"""Return the current async library's cancellation exception class."""
|
||||
return get_async_backend().cancelled_exception_class()
|
||||
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
|
||||
@contextmanager
|
||||
def claim_worker_thread(
|
||||
backend_class: type[AsyncBackend], token: object
|
||||
) -> Generator[Any, None, None]:
|
||||
threadlocals.current_async_backend = backend_class
|
||||
threadlocals.current_token = token
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del threadlocals.current_async_backend
|
||||
del threadlocals.current_token
|
||||
|
||||
|
||||
def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
|
||||
if asynclib_name is None:
|
||||
asynclib_name = sniffio.current_async_library()
|
||||
|
||||
# We use our own dict instead of sys.modules to get the already imported back-end
|
||||
# class because the appropriate modules in sys.modules could potentially be only
|
||||
# partially initialized
|
||||
try:
|
||||
return loaded_backends[asynclib_name]
|
||||
except KeyError:
|
||||
module = import_module(f"anyio._backends._{asynclib_name}")
|
||||
loaded_backends[asynclib_name] = module.backend_class
|
||||
return module.backend_class
|
||||
@@ -0,0 +1,126 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Generator
|
||||
from textwrap import dedent
|
||||
from typing import Any
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import BaseExceptionGroup
|
||||
|
||||
|
||||
class BrokenResourceError(Exception):
|
||||
"""
|
||||
Raised when trying to use a resource that has been rendered unusable due to external
|
||||
causes (e.g. a send stream whose peer has disconnected).
|
||||
"""
|
||||
|
||||
|
||||
class BrokenWorkerProcess(Exception):
|
||||
"""
|
||||
Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or
|
||||
otherwise misbehaves.
|
||||
"""
|
||||
|
||||
|
||||
class BrokenWorkerIntepreter(Exception):
|
||||
"""
|
||||
Raised by :meth:`~anyio.to_interpreter.run_sync` if an unexpected exception is
|
||||
raised in the subinterpreter.
|
||||
"""
|
||||
|
||||
def __init__(self, excinfo: Any):
|
||||
# This was adapted from concurrent.futures.interpreter.ExecutionFailed
|
||||
msg = excinfo.formatted
|
||||
if not msg:
|
||||
if excinfo.type and excinfo.msg:
|
||||
msg = f"{excinfo.type.__name__}: {excinfo.msg}"
|
||||
else:
|
||||
msg = excinfo.type.__name__ or excinfo.msg
|
||||
|
||||
super().__init__(msg)
|
||||
self.excinfo = excinfo
|
||||
|
||||
def __str__(self) -> str:
|
||||
try:
|
||||
formatted = self.excinfo.errdisplay
|
||||
except Exception:
|
||||
return super().__str__()
|
||||
else:
|
||||
return dedent(
|
||||
f"""
|
||||
{super().__str__()}
|
||||
|
||||
Uncaught in the interpreter:
|
||||
|
||||
{formatted}
|
||||
""".strip()
|
||||
)
|
||||
|
||||
|
||||
class BusyResourceError(Exception):
|
||||
"""
|
||||
Raised when two tasks are trying to read from or write to the same resource
|
||||
concurrently.
|
||||
"""
|
||||
|
||||
def __init__(self, action: str):
|
||||
super().__init__(f"Another task is already {action} this resource")
|
||||
|
||||
|
||||
class ClosedResourceError(Exception):
|
||||
"""Raised when trying to use a resource that has been closed."""
|
||||
|
||||
|
||||
class DelimiterNotFound(Exception):
|
||||
"""
|
||||
Raised during
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
|
||||
maximum number of bytes has been read without the delimiter being found.
|
||||
"""
|
||||
|
||||
def __init__(self, max_bytes: int) -> None:
|
||||
super().__init__(
|
||||
f"The delimiter was not found among the first {max_bytes} bytes"
|
||||
)
|
||||
|
||||
|
||||
class EndOfStream(Exception):
|
||||
"""
|
||||
Raised when trying to read from a stream that has been closed from the other end.
|
||||
"""
|
||||
|
||||
|
||||
class IncompleteRead(Exception):
|
||||
"""
|
||||
Raised during
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
|
||||
connection is closed before the requested amount of bytes has been read.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
"The stream was closed before the read operation could be completed"
|
||||
)
|
||||
|
||||
|
||||
class TypedAttributeLookupError(LookupError):
|
||||
"""
|
||||
Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
|
||||
is not found and no default value has been given.
|
||||
"""
|
||||
|
||||
|
||||
class WouldBlock(Exception):
|
||||
"""Raised by ``X_nowait`` functions if ``X()`` would block."""
|
||||
|
||||
|
||||
def iterate_exceptions(
|
||||
exception: BaseException,
|
||||
) -> Generator[BaseException, None, None]:
|
||||
if isinstance(exception, BaseExceptionGroup):
|
||||
for exc in exception.exceptions:
|
||||
yield from iterate_exceptions(exc)
|
||||
else:
|
||||
yield exception
|
||||
@@ -0,0 +1,742 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
from collections.abc import (
|
||||
AsyncIterator,
|
||||
Callable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Sequence,
|
||||
)
|
||||
from dataclasses import dataclass
|
||||
from functools import partial
|
||||
from os import PathLike
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AnyStr,
|
||||
ClassVar,
|
||||
Final,
|
||||
Generic,
|
||||
overload,
|
||||
)
|
||||
|
||||
from .. import to_thread
|
||||
from ..abc import AsyncResource
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from types import ModuleType
|
||||
|
||||
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
|
||||
else:
|
||||
ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
|
||||
|
||||
|
||||
class AsyncFile(AsyncResource, Generic[AnyStr]):
|
||||
"""
|
||||
An asynchronous file object.
|
||||
|
||||
This class wraps a standard file object and provides async friendly versions of the
|
||||
following blocking methods (where available on the original file object):
|
||||
|
||||
* read
|
||||
* read1
|
||||
* readline
|
||||
* readlines
|
||||
* readinto
|
||||
* readinto1
|
||||
* write
|
||||
* writelines
|
||||
* truncate
|
||||
* seek
|
||||
* tell
|
||||
* flush
|
||||
|
||||
All other methods are directly passed through.
|
||||
|
||||
This class supports the asynchronous context manager protocol which closes the
|
||||
underlying file at the end of the context block.
|
||||
|
||||
This class also supports asynchronous iteration::
|
||||
|
||||
async with await open_file(...) as f:
|
||||
async for line in f:
|
||||
print(line)
|
||||
"""
|
||||
|
||||
def __init__(self, fp: IO[AnyStr]) -> None:
|
||||
self._fp: Any = fp
|
||||
|
||||
def __getattr__(self, name: str) -> object:
|
||||
return getattr(self._fp, name)
|
||||
|
||||
@property
|
||||
def wrapped(self) -> IO[AnyStr]:
|
||||
"""The wrapped file object."""
|
||||
return self._fp
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[AnyStr]:
|
||||
while True:
|
||||
line = await self.readline()
|
||||
if line:
|
||||
yield line
|
||||
else:
|
||||
break
|
||||
|
||||
async def aclose(self) -> None:
|
||||
return await to_thread.run_sync(self._fp.close)
|
||||
|
||||
async def read(self, size: int = -1) -> AnyStr:
|
||||
return await to_thread.run_sync(self._fp.read, size)
|
||||
|
||||
async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
|
||||
return await to_thread.run_sync(self._fp.read1, size)
|
||||
|
||||
async def readline(self) -> AnyStr:
|
||||
return await to_thread.run_sync(self._fp.readline)
|
||||
|
||||
async def readlines(self) -> list[AnyStr]:
|
||||
return await to_thread.run_sync(self._fp.readlines)
|
||||
|
||||
async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
|
||||
return await to_thread.run_sync(self._fp.readinto, b)
|
||||
|
||||
async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
|
||||
return await to_thread.run_sync(self._fp.readinto1, b)
|
||||
|
||||
@overload
|
||||
async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
|
||||
|
||||
@overload
|
||||
async def write(self: AsyncFile[str], b: str) -> int: ...
|
||||
|
||||
async def write(self, b: ReadableBuffer | str) -> int:
|
||||
return await to_thread.run_sync(self._fp.write, b)
|
||||
|
||||
@overload
|
||||
async def writelines(
|
||||
self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
|
||||
|
||||
async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
|
||||
return await to_thread.run_sync(self._fp.writelines, lines)
|
||||
|
||||
async def truncate(self, size: int | None = None) -> int:
|
||||
return await to_thread.run_sync(self._fp.truncate, size)
|
||||
|
||||
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
|
||||
return await to_thread.run_sync(self._fp.seek, offset, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
return await to_thread.run_sync(self._fp.tell)
|
||||
|
||||
async def flush(self) -> None:
|
||||
return await to_thread.run_sync(self._fp.flush)
|
||||
|
||||
|
||||
@overload
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: OpenBinaryMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
closefd: bool = ...,
|
||||
opener: Callable[[str, int], int] | None = ...,
|
||||
) -> AsyncFile[bytes]: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
closefd: bool = ...,
|
||||
opener: Callable[[str, int], int] | None = ...,
|
||||
) -> AsyncFile[str]: ...
|
||||
|
||||
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: str = "r",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
closefd: bool = True,
|
||||
opener: Callable[[str, int], int] | None = None,
|
||||
) -> AsyncFile[Any]:
|
||||
"""
|
||||
Open a file asynchronously.
|
||||
|
||||
The arguments are exactly the same as for the builtin :func:`open`.
|
||||
|
||||
:return: an asynchronous file object
|
||||
|
||||
"""
|
||||
fp = await to_thread.run_sync(
|
||||
open, file, mode, buffering, encoding, errors, newline, closefd, opener
|
||||
)
|
||||
return AsyncFile(fp)
|
||||
|
||||
|
||||
def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
|
||||
"""
|
||||
Wrap an existing file as an asynchronous file.
|
||||
|
||||
:param file: an existing file-like object
|
||||
:return: an asynchronous file object
|
||||
|
||||
"""
|
||||
return AsyncFile(file)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class _PathIterator(AsyncIterator["Path"]):
|
||||
iterator: Iterator[PathLike[str]]
|
||||
|
||||
async def __anext__(self) -> Path:
|
||||
nextval = await to_thread.run_sync(
|
||||
next, self.iterator, None, abandon_on_cancel=True
|
||||
)
|
||||
if nextval is None:
|
||||
raise StopAsyncIteration from None
|
||||
|
||||
return Path(nextval)
|
||||
|
||||
|
||||
class Path:
|
||||
"""
|
||||
An asynchronous version of :class:`pathlib.Path`.
|
||||
|
||||
This class cannot be substituted for :class:`pathlib.Path` or
|
||||
:class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
|
||||
interface.
|
||||
|
||||
It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
|
||||
the deprecated :meth:`~pathlib.Path.link_to` method.
|
||||
|
||||
Some methods may be unavailable or have limited functionality, based on the Python
|
||||
version:
|
||||
|
||||
* :meth:`~pathlib.Path.copy` (available on Python 3.14 or later)
|
||||
* :meth:`~pathlib.Path.copy_into` (available on Python 3.14 or later)
|
||||
* :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later)
|
||||
* :meth:`~pathlib.PurePath.full_match` (available on Python 3.13 or later)
|
||||
* :attr:`~pathlib.Path.info` (available on Python 3.14 or later)
|
||||
* :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later)
|
||||
* :meth:`~pathlib.PurePath.match` (the ``case_sensitive`` parameter is only
|
||||
available on Python 3.13 or later)
|
||||
* :meth:`~pathlib.Path.move` (available on Python 3.14 or later)
|
||||
* :meth:`~pathlib.Path.move_into` (available on Python 3.14 or later)
|
||||
* :meth:`~pathlib.PurePath.relative_to` (the ``walk_up`` parameter is only available
|
||||
on Python 3.12 or later)
|
||||
* :meth:`~pathlib.Path.walk` (available on Python 3.12 or later)
|
||||
|
||||
Any methods that do disk I/O need to be awaited on. These methods are:
|
||||
|
||||
* :meth:`~pathlib.Path.absolute`
|
||||
* :meth:`~pathlib.Path.chmod`
|
||||
* :meth:`~pathlib.Path.cwd`
|
||||
* :meth:`~pathlib.Path.exists`
|
||||
* :meth:`~pathlib.Path.expanduser`
|
||||
* :meth:`~pathlib.Path.group`
|
||||
* :meth:`~pathlib.Path.hardlink_to`
|
||||
* :meth:`~pathlib.Path.home`
|
||||
* :meth:`~pathlib.Path.is_block_device`
|
||||
* :meth:`~pathlib.Path.is_char_device`
|
||||
* :meth:`~pathlib.Path.is_dir`
|
||||
* :meth:`~pathlib.Path.is_fifo`
|
||||
* :meth:`~pathlib.Path.is_file`
|
||||
* :meth:`~pathlib.Path.is_junction`
|
||||
* :meth:`~pathlib.Path.is_mount`
|
||||
* :meth:`~pathlib.Path.is_socket`
|
||||
* :meth:`~pathlib.Path.is_symlink`
|
||||
* :meth:`~pathlib.Path.lchmod`
|
||||
* :meth:`~pathlib.Path.lstat`
|
||||
* :meth:`~pathlib.Path.mkdir`
|
||||
* :meth:`~pathlib.Path.open`
|
||||
* :meth:`~pathlib.Path.owner`
|
||||
* :meth:`~pathlib.Path.read_bytes`
|
||||
* :meth:`~pathlib.Path.read_text`
|
||||
* :meth:`~pathlib.Path.readlink`
|
||||
* :meth:`~pathlib.Path.rename`
|
||||
* :meth:`~pathlib.Path.replace`
|
||||
* :meth:`~pathlib.Path.resolve`
|
||||
* :meth:`~pathlib.Path.rmdir`
|
||||
* :meth:`~pathlib.Path.samefile`
|
||||
* :meth:`~pathlib.Path.stat`
|
||||
* :meth:`~pathlib.Path.symlink_to`
|
||||
* :meth:`~pathlib.Path.touch`
|
||||
* :meth:`~pathlib.Path.unlink`
|
||||
* :meth:`~pathlib.Path.walk`
|
||||
* :meth:`~pathlib.Path.write_bytes`
|
||||
* :meth:`~pathlib.Path.write_text`
|
||||
|
||||
Additionally, the following methods return an async iterator yielding
|
||||
:class:`~.Path` objects:
|
||||
|
||||
* :meth:`~pathlib.Path.glob`
|
||||
* :meth:`~pathlib.Path.iterdir`
|
||||
* :meth:`~pathlib.Path.rglob`
|
||||
"""
|
||||
|
||||
__slots__ = "_path", "__weakref__"
|
||||
|
||||
__weakref__: Any
|
||||
|
||||
def __init__(self, *args: str | PathLike[str]) -> None:
|
||||
self._path: Final[pathlib.Path] = pathlib.Path(*args)
|
||||
|
||||
def __fspath__(self) -> str:
|
||||
return self._path.__fspath__()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self._path.__str__()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}({self.as_posix()!r})"
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
return self._path.__bytes__()
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return self._path.__hash__()
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__eq__(target)
|
||||
|
||||
def __lt__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__lt__(target)
|
||||
|
||||
def __le__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__le__(target)
|
||||
|
||||
def __gt__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__gt__(target)
|
||||
|
||||
def __ge__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__ge__(target)
|
||||
|
||||
def __truediv__(self, other: str | PathLike[str]) -> Path:
|
||||
return Path(self._path / other)
|
||||
|
||||
def __rtruediv__(self, other: str | PathLike[str]) -> Path:
|
||||
return Path(other) / self
|
||||
|
||||
@property
|
||||
def parts(self) -> tuple[str, ...]:
|
||||
return self._path.parts
|
||||
|
||||
@property
|
||||
def drive(self) -> str:
|
||||
return self._path.drive
|
||||
|
||||
@property
|
||||
def root(self) -> str:
|
||||
return self._path.root
|
||||
|
||||
@property
|
||||
def anchor(self) -> str:
|
||||
return self._path.anchor
|
||||
|
||||
@property
|
||||
def parents(self) -> Sequence[Path]:
|
||||
return tuple(Path(p) for p in self._path.parents)
|
||||
|
||||
@property
|
||||
def parent(self) -> Path:
|
||||
return Path(self._path.parent)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self._path.name
|
||||
|
||||
@property
|
||||
def suffix(self) -> str:
|
||||
return self._path.suffix
|
||||
|
||||
@property
|
||||
def suffixes(self) -> list[str]:
|
||||
return self._path.suffixes
|
||||
|
||||
@property
|
||||
def stem(self) -> str:
|
||||
return self._path.stem
|
||||
|
||||
async def absolute(self) -> Path:
|
||||
path = await to_thread.run_sync(self._path.absolute)
|
||||
return Path(path)
|
||||
|
||||
def as_posix(self) -> str:
|
||||
return self._path.as_posix()
|
||||
|
||||
def as_uri(self) -> str:
|
||||
return self._path.as_uri()
|
||||
|
||||
if sys.version_info >= (3, 13):
|
||||
parser: ClassVar[ModuleType] = pathlib.Path.parser
|
||||
|
||||
@classmethod
|
||||
def from_uri(cls, uri: str) -> Path:
|
||||
return Path(pathlib.Path.from_uri(uri))
|
||||
|
||||
def full_match(
|
||||
self, path_pattern: str, *, case_sensitive: bool | None = None
|
||||
) -> bool:
|
||||
return self._path.full_match(path_pattern, case_sensitive=case_sensitive)
|
||||
|
||||
def match(
|
||||
self, path_pattern: str, *, case_sensitive: bool | None = None
|
||||
) -> bool:
|
||||
return self._path.match(path_pattern, case_sensitive=case_sensitive)
|
||||
else:
|
||||
|
||||
def match(self, path_pattern: str) -> bool:
|
||||
return self._path.match(path_pattern)
|
||||
|
||||
if sys.version_info >= (3, 14):
|
||||
|
||||
@property
|
||||
def info(self) -> Any: # TODO: add return type annotation when Typeshed gets it
|
||||
return self._path.info
|
||||
|
||||
async def copy(
|
||||
self,
|
||||
target: str | os.PathLike[str],
|
||||
*,
|
||||
follow_symlinks: bool = True,
|
||||
dirs_exist_ok: bool = False,
|
||||
preserve_metadata: bool = False,
|
||||
) -> Path:
|
||||
func = partial(
|
||||
self._path.copy,
|
||||
follow_symlinks=follow_symlinks,
|
||||
dirs_exist_ok=dirs_exist_ok,
|
||||
preserve_metadata=preserve_metadata,
|
||||
)
|
||||
return Path(await to_thread.run_sync(func, target))
|
||||
|
||||
async def copy_into(
|
||||
self,
|
||||
target_dir: str | os.PathLike[str],
|
||||
*,
|
||||
follow_symlinks: bool = True,
|
||||
dirs_exist_ok: bool = False,
|
||||
preserve_metadata: bool = False,
|
||||
) -> Path:
|
||||
func = partial(
|
||||
self._path.copy_into,
|
||||
follow_symlinks=follow_symlinks,
|
||||
dirs_exist_ok=dirs_exist_ok,
|
||||
preserve_metadata=preserve_metadata,
|
||||
)
|
||||
return Path(await to_thread.run_sync(func, target_dir))
|
||||
|
||||
async def move(self, target: str | os.PathLike[str]) -> Path:
|
||||
# Upstream does not handle anyio.Path properly as a PathLike
|
||||
target = pathlib.Path(target)
|
||||
return Path(await to_thread.run_sync(self._path.move, target))
|
||||
|
||||
async def move_into(
|
||||
self,
|
||||
target_dir: str | os.PathLike[str],
|
||||
) -> Path:
|
||||
return Path(await to_thread.run_sync(self._path.move_into, target_dir))
|
||||
|
||||
def is_relative_to(self, other: str | PathLike[str]) -> bool:
|
||||
try:
|
||||
self.relative_to(other)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
|
||||
func = partial(os.chmod, follow_symlinks=follow_symlinks)
|
||||
return await to_thread.run_sync(func, self._path, mode)
|
||||
|
||||
@classmethod
|
||||
async def cwd(cls) -> Path:
|
||||
path = await to_thread.run_sync(pathlib.Path.cwd)
|
||||
return cls(path)
|
||||
|
||||
async def exists(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
|
||||
|
||||
async def expanduser(self) -> Path:
|
||||
return Path(
|
||||
await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
|
||||
)
|
||||
|
||||
def glob(self, pattern: str) -> AsyncIterator[Path]:
|
||||
gen = self._path.glob(pattern)
|
||||
return _PathIterator(gen)
|
||||
|
||||
async def group(self) -> str:
|
||||
return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
|
||||
|
||||
async def hardlink_to(
|
||||
self, target: str | bytes | PathLike[str] | PathLike[bytes]
|
||||
) -> None:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(os.link, target, self)
|
||||
|
||||
@classmethod
|
||||
async def home(cls) -> Path:
|
||||
home_path = await to_thread.run_sync(pathlib.Path.home)
|
||||
return cls(home_path)
|
||||
|
||||
def is_absolute(self) -> bool:
|
||||
return self._path.is_absolute()
|
||||
|
||||
async def is_block_device(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
self._path.is_block_device, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def is_char_device(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
self._path.is_char_device, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def is_dir(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
|
||||
|
||||
async def is_fifo(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
|
||||
|
||||
async def is_file(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
async def is_junction(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_junction)
|
||||
|
||||
async def is_mount(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
os.path.ismount, self._path, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
def is_reserved(self) -> bool:
|
||||
return self._path.is_reserved()
|
||||
|
||||
async def is_socket(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
|
||||
|
||||
async def is_symlink(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
|
||||
|
||||
async def iterdir(self) -> AsyncIterator[Path]:
|
||||
gen = (
|
||||
self._path.iterdir()
|
||||
if sys.version_info < (3, 13)
|
||||
else await to_thread.run_sync(self._path.iterdir, abandon_on_cancel=True)
|
||||
)
|
||||
async for path in _PathIterator(gen):
|
||||
yield path
|
||||
|
||||
def joinpath(self, *args: str | PathLike[str]) -> Path:
|
||||
return Path(self._path.joinpath(*args))
|
||||
|
||||
async def lchmod(self, mode: int) -> None:
|
||||
await to_thread.run_sync(self._path.lchmod, mode)
|
||||
|
||||
async def lstat(self) -> os.stat_result:
|
||||
return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
|
||||
|
||||
async def mkdir(
|
||||
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
|
||||
) -> None:
|
||||
await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
|
||||
|
||||
@overload
|
||||
async def open(
|
||||
self,
|
||||
mode: OpenBinaryMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
) -> AsyncFile[bytes]: ...
|
||||
|
||||
@overload
|
||||
async def open(
|
||||
self,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
) -> AsyncFile[str]: ...
|
||||
|
||||
async def open(
|
||||
self,
|
||||
mode: str = "r",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> AsyncFile[Any]:
|
||||
fp = await to_thread.run_sync(
|
||||
self._path.open, mode, buffering, encoding, errors, newline
|
||||
)
|
||||
return AsyncFile(fp)
|
||||
|
||||
async def owner(self) -> str:
|
||||
return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
|
||||
|
||||
async def read_bytes(self) -> bytes:
|
||||
return await to_thread.run_sync(self._path.read_bytes)
|
||||
|
||||
async def read_text(
|
||||
self, encoding: str | None = None, errors: str | None = None
|
||||
) -> str:
|
||||
return await to_thread.run_sync(self._path.read_text, encoding, errors)
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
def relative_to(
|
||||
self, *other: str | PathLike[str], walk_up: bool = False
|
||||
) -> Path:
|
||||
return Path(self._path.relative_to(*other, walk_up=walk_up))
|
||||
|
||||
else:
|
||||
|
||||
def relative_to(self, *other: str | PathLike[str]) -> Path:
|
||||
return Path(self._path.relative_to(*other))
|
||||
|
||||
async def readlink(self) -> Path:
|
||||
target = await to_thread.run_sync(os.readlink, self._path)
|
||||
return Path(target)
|
||||
|
||||
async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.rename, target)
|
||||
return Path(target)
|
||||
|
||||
async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.replace, target)
|
||||
return Path(target)
|
||||
|
||||
async def resolve(self, strict: bool = False) -> Path:
|
||||
func = partial(self._path.resolve, strict=strict)
|
||||
return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
|
||||
|
||||
def rglob(self, pattern: str) -> AsyncIterator[Path]:
|
||||
gen = self._path.rglob(pattern)
|
||||
return _PathIterator(gen)
|
||||
|
||||
async def rmdir(self) -> None:
|
||||
await to_thread.run_sync(self._path.rmdir)
|
||||
|
||||
async def samefile(self, other_path: str | PathLike[str]) -> bool:
|
||||
if isinstance(other_path, Path):
|
||||
other_path = other_path._path
|
||||
|
||||
return await to_thread.run_sync(
|
||||
self._path.samefile, other_path, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
|
||||
func = partial(os.stat, follow_symlinks=follow_symlinks)
|
||||
return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
|
||||
|
||||
async def symlink_to(
|
||||
self,
|
||||
target: str | bytes | PathLike[str] | PathLike[bytes],
|
||||
target_is_directory: bool = False,
|
||||
) -> None:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
|
||||
|
||||
async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
|
||||
await to_thread.run_sync(self._path.touch, mode, exist_ok)
|
||||
|
||||
async def unlink(self, missing_ok: bool = False) -> None:
|
||||
try:
|
||||
await to_thread.run_sync(self._path.unlink)
|
||||
except FileNotFoundError:
|
||||
if not missing_ok:
|
||||
raise
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
async def walk(
|
||||
self,
|
||||
top_down: bool = True,
|
||||
on_error: Callable[[OSError], object] | None = None,
|
||||
follow_symlinks: bool = False,
|
||||
) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
|
||||
def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
|
||||
try:
|
||||
return next(gen)
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
gen = self._path.walk(top_down, on_error, follow_symlinks)
|
||||
while True:
|
||||
value = await to_thread.run_sync(get_next_value)
|
||||
if value is None:
|
||||
return
|
||||
|
||||
root, dirs, paths = value
|
||||
yield Path(root), dirs, paths
|
||||
|
||||
def with_name(self, name: str) -> Path:
|
||||
return Path(self._path.with_name(name))
|
||||
|
||||
def with_stem(self, stem: str) -> Path:
|
||||
return Path(self._path.with_name(stem + self._path.suffix))
|
||||
|
||||
def with_suffix(self, suffix: str) -> Path:
|
||||
return Path(self._path.with_suffix(suffix))
|
||||
|
||||
def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
|
||||
return Path(*pathsegments)
|
||||
|
||||
async def write_bytes(self, data: bytes) -> int:
|
||||
return await to_thread.run_sync(self._path.write_bytes, data)
|
||||
|
||||
async def write_text(
|
||||
self,
|
||||
data: str,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> int:
|
||||
# Path.write_text() does not support the "newline" parameter before Python 3.10
|
||||
def sync_write_text() -> int:
|
||||
with self._path.open(
|
||||
"w", encoding=encoding, errors=errors, newline=newline
|
||||
) as fp:
|
||||
return fp.write(data)
|
||||
|
||||
return await to_thread.run_sync(sync_write_text)
|
||||
|
||||
|
||||
PathLike.register(Path)
|
||||
@@ -0,0 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..abc import AsyncResource
|
||||
from ._tasks import CancelScope
|
||||
|
||||
|
||||
async def aclose_forcefully(resource: AsyncResource) -> None:
|
||||
"""
|
||||
Close an asynchronous resource in a cancelled scope.
|
||||
|
||||
Doing this closes the resource without waiting on anything.
|
||||
|
||||
:param resource: the resource to close
|
||||
|
||||
"""
|
||||
with CancelScope() as scope:
|
||||
scope.cancel()
|
||||
await resource.aclose()
|
||||
@@ -0,0 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from contextlib import AbstractContextManager
|
||||
from signal import Signals
|
||||
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
def open_signal_receiver(
|
||||
*signals: Signals,
|
||||
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
||||
"""
|
||||
Start receiving operating system signals.
|
||||
|
||||
:param signals: signals to receive (e.g. ``signal.SIGINT``)
|
||||
:return: an asynchronous context manager for an asynchronous iterator which yields
|
||||
signal numbers
|
||||
|
||||
.. warning:: Windows does not support signals natively so it is best to avoid
|
||||
relying on this in cross-platform applications.
|
||||
|
||||
.. warning:: On asyncio, this permanently replaces any previous signal handler for
|
||||
the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
|
||||
|
||||
"""
|
||||
return get_async_backend().open_signal_receiver(*signals)
|
||||
@@ -0,0 +1,792 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import stat
|
||||
import sys
|
||||
from collections.abc import Awaitable
|
||||
from ipaddress import IPv6Address, ip_address
|
||||
from os import PathLike, chmod
|
||||
from socket import AddressFamily, SocketKind
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast, overload
|
||||
|
||||
from .. import to_thread
|
||||
from ..abc import (
|
||||
ConnectedUDPSocket,
|
||||
ConnectedUNIXDatagramSocket,
|
||||
IPAddressType,
|
||||
IPSockAddrType,
|
||||
SocketListener,
|
||||
SocketStream,
|
||||
UDPSocket,
|
||||
UNIXDatagramSocket,
|
||||
UNIXSocketStream,
|
||||
)
|
||||
from ..streams.stapled import MultiListener
|
||||
from ..streams.tls import TLSStream
|
||||
from ._eventloop import get_async_backend
|
||||
from ._resources import aclose_forcefully
|
||||
from ._synchronization import Event
|
||||
from ._tasks import create_task_group, move_on_after
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import FileDescriptorLike
|
||||
else:
|
||||
FileDescriptorLike = object
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import ExceptionGroup
|
||||
|
||||
if sys.version_info < (3, 13):
|
||||
from typing_extensions import deprecated
|
||||
else:
|
||||
from warnings import deprecated
|
||||
|
||||
IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
|
||||
|
||||
AnyIPAddressFamily = Literal[
|
||||
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
|
||||
]
|
||||
IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
|
||||
|
||||
|
||||
# tls_hostname given
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# ssl_context given
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
ssl_context: ssl.SSLContext,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# tls=True
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
tls: Literal[True],
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# tls=False
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
tls: Literal[False],
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> SocketStream: ...
|
||||
|
||||
|
||||
# No TLS arguments
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> SocketStream: ...
|
||||
|
||||
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
tls: bool = False,
|
||||
ssl_context: ssl.SSLContext | None = None,
|
||||
tls_standard_compatible: bool = True,
|
||||
tls_hostname: str | None = None,
|
||||
happy_eyeballs_delay: float = 0.25,
|
||||
) -> SocketStream | TLSStream:
|
||||
"""
|
||||
Connect to a host using the TCP protocol.
|
||||
|
||||
This function implements the stateless version of the Happy Eyeballs algorithm (RFC
|
||||
6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
|
||||
each one is tried until one connection attempt succeeds. If the first attempt does
|
||||
not connected within 250 milliseconds, a second attempt is started using the next
|
||||
address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
|
||||
available) is tried first.
|
||||
|
||||
When the connection has been established, a TLS handshake will be done if either
|
||||
``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
|
||||
|
||||
:param remote_host: the IP address or host name to connect to
|
||||
:param remote_port: port on the target host to connect to
|
||||
:param local_host: the interface address or name to bind the socket to before
|
||||
connecting
|
||||
:param tls: ``True`` to do a TLS handshake with the connected stream and return a
|
||||
:class:`~anyio.streams.tls.TLSStream` instead
|
||||
:param ssl_context: the SSL context object to use (if omitted, a default context is
|
||||
created)
|
||||
:param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
|
||||
before closing the stream and requires that the server does this as well.
|
||||
Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
|
||||
Some protocols, such as HTTP, require this option to be ``False``.
|
||||
See :meth:`~ssl.SSLContext.wrap_socket` for details.
|
||||
:param tls_hostname: host name to check the server certificate against (defaults to
|
||||
the value of ``remote_host``)
|
||||
:param happy_eyeballs_delay: delay (in seconds) before starting the next connection
|
||||
attempt
|
||||
:return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
|
||||
:raises OSError: if the connection attempt fails
|
||||
|
||||
"""
|
||||
# Placed here due to https://github.com/python/mypy/issues/7057
|
||||
connected_stream: SocketStream | None = None
|
||||
|
||||
async def try_connect(remote_host: str, event: Event) -> None:
|
||||
nonlocal connected_stream
|
||||
try:
|
||||
stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
|
||||
except OSError as exc:
|
||||
oserrors.append(exc)
|
||||
return
|
||||
else:
|
||||
if connected_stream is None:
|
||||
connected_stream = stream
|
||||
tg.cancel_scope.cancel()
|
||||
else:
|
||||
await stream.aclose()
|
||||
finally:
|
||||
event.set()
|
||||
|
||||
asynclib = get_async_backend()
|
||||
local_address: IPSockAddrType | None = None
|
||||
family = socket.AF_UNSPEC
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(str(local_host), None)
|
||||
family, *_, local_address = gai_res[0]
|
||||
|
||||
target_host = str(remote_host)
|
||||
try:
|
||||
addr_obj = ip_address(remote_host)
|
||||
except ValueError:
|
||||
addr_obj = None
|
||||
|
||||
if addr_obj is not None:
|
||||
if isinstance(addr_obj, IPv6Address):
|
||||
target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
|
||||
else:
|
||||
target_addrs = [(socket.AF_INET, addr_obj.compressed)]
|
||||
else:
|
||||
# getaddrinfo() will raise an exception if name resolution fails
|
||||
gai_res = await getaddrinfo(
|
||||
target_host, remote_port, family=family, type=socket.SOCK_STREAM
|
||||
)
|
||||
|
||||
# Organize the list so that the first address is an IPv6 address (if available)
|
||||
# and the second one is an IPv4 addresses. The rest can be in whatever order.
|
||||
v6_found = v4_found = False
|
||||
target_addrs = []
|
||||
for af, *rest, sa in gai_res:
|
||||
if af == socket.AF_INET6 and not v6_found:
|
||||
v6_found = True
|
||||
target_addrs.insert(0, (af, sa[0]))
|
||||
elif af == socket.AF_INET and not v4_found and v6_found:
|
||||
v4_found = True
|
||||
target_addrs.insert(1, (af, sa[0]))
|
||||
else:
|
||||
target_addrs.append((af, sa[0]))
|
||||
|
||||
oserrors: list[OSError] = []
|
||||
try:
|
||||
async with create_task_group() as tg:
|
||||
for i, (af, addr) in enumerate(target_addrs):
|
||||
event = Event()
|
||||
tg.start_soon(try_connect, addr, event)
|
||||
with move_on_after(happy_eyeballs_delay):
|
||||
await event.wait()
|
||||
|
||||
if connected_stream is None:
|
||||
cause = (
|
||||
oserrors[0]
|
||||
if len(oserrors) == 1
|
||||
else ExceptionGroup("multiple connection attempts failed", oserrors)
|
||||
)
|
||||
raise OSError("All connection attempts failed") from cause
|
||||
finally:
|
||||
oserrors.clear()
|
||||
|
||||
if tls or tls_hostname or ssl_context:
|
||||
try:
|
||||
return await TLSStream.wrap(
|
||||
connected_stream,
|
||||
server_side=False,
|
||||
hostname=tls_hostname or str(remote_host),
|
||||
ssl_context=ssl_context,
|
||||
standard_compatible=tls_standard_compatible,
|
||||
)
|
||||
except BaseException:
|
||||
await aclose_forcefully(connected_stream)
|
||||
raise
|
||||
|
||||
return connected_stream
|
||||
|
||||
|
||||
async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
|
||||
"""
|
||||
Connect to the given UNIX socket.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path to the socket
|
||||
:return: a socket stream object
|
||||
|
||||
"""
|
||||
path = os.fspath(path)
|
||||
return await get_async_backend().connect_unix(path)
|
||||
|
||||
|
||||
async def create_tcp_listener(
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
|
||||
backlog: int = 65536,
|
||||
reuse_port: bool = False,
|
||||
) -> MultiListener[SocketStream]:
|
||||
"""
|
||||
Create a TCP socket listener.
|
||||
|
||||
:param local_port: port number to listen on
|
||||
:param local_host: IP address of the interface to listen on. If omitted, listen on
|
||||
all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
|
||||
family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
|
||||
:param family: address family (used if ``local_host`` was omitted)
|
||||
:param backlog: maximum number of queued incoming connections (up to a maximum of
|
||||
2**16, or 65536)
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a list of listener objects
|
||||
|
||||
"""
|
||||
asynclib = get_async_backend()
|
||||
backlog = min(backlog, 65536)
|
||||
local_host = str(local_host) if local_host is not None else None
|
||||
gai_res = await getaddrinfo(
|
||||
local_host,
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
listeners: list[SocketListener] = []
|
||||
try:
|
||||
# The set() is here to work around a glibc bug:
|
||||
# https://sourceware.org/bugzilla/show_bug.cgi?id=14969
|
||||
sockaddr: tuple[str, int] | tuple[str, int, int, int]
|
||||
for fam, kind, *_, sockaddr in sorted(set(gai_res)):
|
||||
# Workaround for an uvloop bug where we don't get the correct scope ID for
|
||||
# IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
|
||||
# getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
|
||||
if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
|
||||
continue
|
||||
|
||||
raw_socket = socket.socket(fam)
|
||||
raw_socket.setblocking(False)
|
||||
|
||||
# For Windows, enable exclusive address use. For others, enable address
|
||||
# reuse.
|
||||
if sys.platform == "win32":
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
||||
else:
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
if reuse_port:
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
|
||||
# If only IPv6 was requested, disable dual stack operation
|
||||
if fam == socket.AF_INET6:
|
||||
raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
|
||||
|
||||
# Workaround for #554
|
||||
if "%" in sockaddr[0]:
|
||||
addr, scope_id = sockaddr[0].split("%", 1)
|
||||
sockaddr = (addr, sockaddr[1], 0, int(scope_id))
|
||||
|
||||
raw_socket.bind(sockaddr)
|
||||
raw_socket.listen(backlog)
|
||||
listener = asynclib.create_tcp_listener(raw_socket)
|
||||
listeners.append(listener)
|
||||
except BaseException:
|
||||
for listener in listeners:
|
||||
await listener.aclose()
|
||||
|
||||
raise
|
||||
|
||||
return MultiListener(listeners)
|
||||
|
||||
|
||||
async def create_unix_listener(
|
||||
path: str | bytes | PathLike[Any],
|
||||
*,
|
||||
mode: int | None = None,
|
||||
backlog: int = 65536,
|
||||
) -> SocketListener:
|
||||
"""
|
||||
Create a UNIX socket listener.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path of the socket
|
||||
:param mode: permissions to set on the socket
|
||||
:param backlog: maximum number of queued incoming connections (up to a maximum of
|
||||
2**16, or 65536)
|
||||
:return: a listener object
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
If a socket already exists on the file system in the given path, it will be
|
||||
removed first.
|
||||
|
||||
"""
|
||||
backlog = min(backlog, 65536)
|
||||
raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
|
||||
try:
|
||||
raw_socket.listen(backlog)
|
||||
return get_async_backend().create_unix_listener(raw_socket)
|
||||
except BaseException:
|
||||
raw_socket.close()
|
||||
raise
|
||||
|
||||
|
||||
async def create_udp_socket(
|
||||
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
reuse_port: bool = False,
|
||||
) -> UDPSocket:
|
||||
"""
|
||||
Create a UDP socket.
|
||||
|
||||
If ``port`` has been given, the socket will be bound to this port on the local
|
||||
machine, making this socket suitable for providing UDP based services.
|
||||
|
||||
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
|
||||
determined from ``local_host`` if omitted
|
||||
:param local_host: IP address or host name of the local interface to bind to
|
||||
:param local_port: local port to bind to
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a UDP socket
|
||||
|
||||
"""
|
||||
if family is AddressFamily.AF_UNSPEC and not local_host:
|
||||
raise ValueError('Either "family" or "local_host" must be given')
|
||||
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(
|
||||
str(local_host),
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SOCK_DGRAM,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
local_address = gai_res[0][-1]
|
||||
elif family is AddressFamily.AF_INET6:
|
||||
local_address = ("::", 0)
|
||||
else:
|
||||
local_address = ("0.0.0.0", 0)
|
||||
|
||||
sock = await get_async_backend().create_udp_socket(
|
||||
family, local_address, None, reuse_port
|
||||
)
|
||||
return cast(UDPSocket, sock)
|
||||
|
||||
|
||||
async def create_connected_udp_socket(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
reuse_port: bool = False,
|
||||
) -> ConnectedUDPSocket:
|
||||
"""
|
||||
Create a connected UDP socket.
|
||||
|
||||
Connected UDP sockets can only communicate with the specified remote host/port, an
|
||||
any packets sent from other sources are dropped.
|
||||
|
||||
:param remote_host: remote host to set as the default target
|
||||
:param remote_port: port on the remote host to set as the default target
|
||||
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
|
||||
determined from ``local_host`` or ``remote_host`` if omitted
|
||||
:param local_host: IP address or host name of the local interface to bind to
|
||||
:param local_port: local port to bind to
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a connected UDP socket
|
||||
|
||||
"""
|
||||
local_address = None
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(
|
||||
str(local_host),
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SOCK_DGRAM,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
local_address = gai_res[0][-1]
|
||||
|
||||
gai_res = await getaddrinfo(
|
||||
str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
remote_address = gai_res[0][-1]
|
||||
|
||||
sock = await get_async_backend().create_udp_socket(
|
||||
family, local_address, remote_address, reuse_port
|
||||
)
|
||||
return cast(ConnectedUDPSocket, sock)
|
||||
|
||||
|
||||
async def create_unix_datagram_socket(
|
||||
*,
|
||||
local_path: None | str | bytes | PathLike[Any] = None,
|
||||
local_mode: int | None = None,
|
||||
) -> UNIXDatagramSocket:
|
||||
"""
|
||||
Create a UNIX datagram socket.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
If ``local_path`` has been given, the socket will be bound to this path, making this
|
||||
socket suitable for receiving datagrams from other processes. Other processes can
|
||||
send datagrams to this socket only if ``local_path`` is set.
|
||||
|
||||
If a socket already exists on the file system in the ``local_path``, it will be
|
||||
removed first.
|
||||
|
||||
:param local_path: the path on which to bind to
|
||||
:param local_mode: permissions to set on the local socket
|
||||
:return: a UNIX datagram socket
|
||||
|
||||
"""
|
||||
raw_socket = await setup_unix_local_socket(
|
||||
local_path, local_mode, socket.SOCK_DGRAM
|
||||
)
|
||||
return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
|
||||
|
||||
|
||||
async def create_connected_unix_datagram_socket(
|
||||
remote_path: str | bytes | PathLike[Any],
|
||||
*,
|
||||
local_path: None | str | bytes | PathLike[Any] = None,
|
||||
local_mode: int | None = None,
|
||||
) -> ConnectedUNIXDatagramSocket:
|
||||
"""
|
||||
Create a connected UNIX datagram socket.
|
||||
|
||||
Connected datagram sockets can only communicate with the specified remote path.
|
||||
|
||||
If ``local_path`` has been given, the socket will be bound to this path, making
|
||||
this socket suitable for receiving datagrams from other processes. Other processes
|
||||
can send datagrams to this socket only if ``local_path`` is set.
|
||||
|
||||
If a socket already exists on the file system in the ``local_path``, it will be
|
||||
removed first.
|
||||
|
||||
:param remote_path: the path to set as the default target
|
||||
:param local_path: the path on which to bind to
|
||||
:param local_mode: permissions to set on the local socket
|
||||
:return: a connected UNIX datagram socket
|
||||
|
||||
"""
|
||||
remote_path = os.fspath(remote_path)
|
||||
raw_socket = await setup_unix_local_socket(
|
||||
local_path, local_mode, socket.SOCK_DGRAM
|
||||
)
|
||||
return await get_async_backend().create_unix_datagram_socket(
|
||||
raw_socket, remote_path
|
||||
)
|
||||
|
||||
|
||||
async def getaddrinfo(
|
||||
host: bytes | str | None,
|
||||
port: str | int | None,
|
||||
*,
|
||||
family: int | AddressFamily = 0,
|
||||
type: int | SocketKind = 0,
|
||||
proto: int = 0,
|
||||
flags: int = 0,
|
||||
) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
|
||||
"""
|
||||
Look up a numeric IP address given a host name.
|
||||
|
||||
Internationalized domain names are translated according to the (non-transitional)
|
||||
IDNA 2008 standard.
|
||||
|
||||
.. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
|
||||
(host, port), unlike what :func:`socket.getaddrinfo` does.
|
||||
|
||||
:param host: host name
|
||||
:param port: port number
|
||||
:param family: socket family (`'AF_INET``, ...)
|
||||
:param type: socket type (``SOCK_STREAM``, ...)
|
||||
:param proto: protocol number
|
||||
:param flags: flags to pass to upstream ``getaddrinfo()``
|
||||
:return: list of tuples containing (family, type, proto, canonname, sockaddr)
|
||||
|
||||
.. seealso:: :func:`socket.getaddrinfo`
|
||||
|
||||
"""
|
||||
# Handle unicode hostnames
|
||||
if isinstance(host, str):
|
||||
try:
|
||||
encoded_host: bytes | None = host.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
import idna
|
||||
|
||||
encoded_host = idna.encode(host, uts46=True)
|
||||
else:
|
||||
encoded_host = host
|
||||
|
||||
gai_res = await get_async_backend().getaddrinfo(
|
||||
encoded_host, port, family=family, type=type, proto=proto, flags=flags
|
||||
)
|
||||
return [
|
||||
(family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
|
||||
for family, type, proto, canonname, sockaddr in gai_res
|
||||
# filter out IPv6 results when IPv6 is disabled
|
||||
if not isinstance(sockaddr[0], int)
|
||||
]
|
||||
|
||||
|
||||
def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
|
||||
"""
|
||||
Look up the host name of an IP address.
|
||||
|
||||
:param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
|
||||
:param flags: flags to pass to upstream ``getnameinfo()``
|
||||
:return: a tuple of (host name, service name)
|
||||
|
||||
.. seealso:: :func:`socket.getnameinfo`
|
||||
|
||||
"""
|
||||
return get_async_backend().getnameinfo(sockaddr, flags)
|
||||
|
||||
|
||||
@deprecated("This function is deprecated; use `wait_readable` instead")
|
||||
def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
|
||||
"""
|
||||
.. deprecated:: 4.7.0
|
||||
Use :func:`wait_readable` instead.
|
||||
|
||||
Wait until the given socket has data to be read.
|
||||
|
||||
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param sock: a socket object
|
||||
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
|
||||
socket to become readable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
|
||||
to become readable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_readable(sock.fileno())
|
||||
|
||||
|
||||
@deprecated("This function is deprecated; use `wait_writable` instead")
|
||||
def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
|
||||
"""
|
||||
.. deprecated:: 4.7.0
|
||||
Use :func:`wait_writable` instead.
|
||||
|
||||
Wait until the given socket can be written to.
|
||||
|
||||
This does **NOT** work on Windows when using the asyncio backend with a proactor
|
||||
event loop (default on py3.8+).
|
||||
|
||||
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param sock: a socket object
|
||||
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
|
||||
socket to become writable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
|
||||
to become writable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_writable(sock.fileno())
|
||||
|
||||
|
||||
def wait_readable(obj: FileDescriptorLike) -> Awaitable[None]:
|
||||
"""
|
||||
Wait until the given object has data to be read.
|
||||
|
||||
On Unix systems, ``obj`` must either be an integer file descriptor, or else an
|
||||
object with a ``.fileno()`` method which returns an integer file descriptor. Any
|
||||
kind of file descriptor can be passed, though the exact semantics will depend on
|
||||
your kernel. For example, this probably won't do anything useful for on-disk files.
|
||||
|
||||
On Windows systems, ``obj`` must either be an integer ``SOCKET`` handle, or else an
|
||||
object with a ``.fileno()`` method which returns an integer ``SOCKET`` handle. File
|
||||
descriptors aren't supported, and neither are handles that refer to anything besides
|
||||
a ``SOCKET``.
|
||||
|
||||
On backends where this functionality is not natively provided (asyncio
|
||||
``ProactorEventLoop`` on Windows), it is provided using a separate selector thread
|
||||
which is set to shut down when the interpreter shuts down.
|
||||
|
||||
.. warning:: Don't use this on raw sockets that have been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param obj: an object with a ``.fileno()`` method or an integer handle
|
||||
:raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
|
||||
object to become readable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the object
|
||||
to become readable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_readable(obj)
|
||||
|
||||
|
||||
def wait_writable(obj: FileDescriptorLike) -> Awaitable[None]:
|
||||
"""
|
||||
Wait until the given object can be written to.
|
||||
|
||||
:param obj: an object with a ``.fileno()`` method or an integer handle
|
||||
:raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
|
||||
object to become writable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the object
|
||||
to become writable
|
||||
|
||||
.. seealso:: See the documentation of :func:`wait_readable` for the definition of
|
||||
``obj`` and notes on backend compatibility.
|
||||
|
||||
.. warning:: Don't use this on raw sockets that have been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_writable(obj)
|
||||
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
|
||||
def convert_ipv6_sockaddr(
|
||||
sockaddr: tuple[str, int, int, int] | tuple[str, int],
|
||||
) -> tuple[str, int]:
|
||||
"""
|
||||
Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
|
||||
|
||||
If the scope ID is nonzero, it is added to the address, separated with ``%``.
|
||||
Otherwise the flow id and scope id are simply cut off from the tuple.
|
||||
Any other kinds of socket addresses are returned as-is.
|
||||
|
||||
:param sockaddr: the result of :meth:`~socket.socket.getsockname`
|
||||
:return: the converted socket address
|
||||
|
||||
"""
|
||||
# This is more complicated than it should be because of MyPy
|
||||
if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
|
||||
host, port, flowinfo, scope_id = sockaddr
|
||||
if scope_id:
|
||||
# PyPy (as of v7.3.11) leaves the interface name in the result, so
|
||||
# we discard it and only get the scope ID from the end
|
||||
# (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
|
||||
host = host.split("%")[0]
|
||||
|
||||
# Add scope_id to the address
|
||||
return f"{host}%{scope_id}", port
|
||||
else:
|
||||
return host, port
|
||||
else:
|
||||
return sockaddr
|
||||
|
||||
|
||||
async def setup_unix_local_socket(
|
||||
path: None | str | bytes | PathLike[Any],
|
||||
mode: int | None,
|
||||
socktype: int,
|
||||
) -> socket.socket:
|
||||
"""
|
||||
Create a UNIX local socket object, deleting the socket at the given path if it
|
||||
exists.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path of the socket
|
||||
:param mode: permissions to set on the socket
|
||||
:param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
|
||||
|
||||
"""
|
||||
path_str: str | None
|
||||
if path is not None:
|
||||
path_str = os.fsdecode(path)
|
||||
|
||||
# Linux abstract namespace sockets aren't backed by a concrete file so skip stat call
|
||||
if not path_str.startswith("\0"):
|
||||
# Copied from pathlib...
|
||||
try:
|
||||
stat_result = os.stat(path)
|
||||
except OSError as e:
|
||||
if e.errno not in (
|
||||
errno.ENOENT,
|
||||
errno.ENOTDIR,
|
||||
errno.EBADF,
|
||||
errno.ELOOP,
|
||||
):
|
||||
raise
|
||||
else:
|
||||
if stat.S_ISSOCK(stat_result.st_mode):
|
||||
os.unlink(path)
|
||||
else:
|
||||
path_str = None
|
||||
|
||||
raw_socket = socket.socket(socket.AF_UNIX, socktype)
|
||||
raw_socket.setblocking(False)
|
||||
|
||||
if path_str is not None:
|
||||
try:
|
||||
await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
|
||||
if mode is not None:
|
||||
await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
|
||||
except BaseException:
|
||||
raw_socket.close()
|
||||
raise
|
||||
|
||||
return raw_socket
|
||||
@@ -0,0 +1,52 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from typing import TypeVar
|
||||
from warnings import warn
|
||||
|
||||
from ..streams.memory import (
|
||||
MemoryObjectReceiveStream,
|
||||
MemoryObjectSendStream,
|
||||
MemoryObjectStreamState,
|
||||
)
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
|
||||
|
||||
class create_memory_object_stream(
|
||||
tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
|
||||
):
|
||||
"""
|
||||
Create a memory object stream.
|
||||
|
||||
The stream's item type can be annotated like
|
||||
:func:`create_memory_object_stream[T_Item]`.
|
||||
|
||||
:param max_buffer_size: number of items held in the buffer until ``send()`` starts
|
||||
blocking
|
||||
:param item_type: old way of marking the streams with the right generic type for
|
||||
static typing (does nothing on AnyIO 4)
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use ``create_memory_object_stream[YourItemType](...)`` instead.
|
||||
:return: a tuple of (send stream, receive stream)
|
||||
|
||||
"""
|
||||
|
||||
def __new__( # type: ignore[misc]
|
||||
cls, max_buffer_size: float = 0, item_type: object = None
|
||||
) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
|
||||
if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
|
||||
raise ValueError("max_buffer_size must be either an integer or math.inf")
|
||||
if max_buffer_size < 0:
|
||||
raise ValueError("max_buffer_size cannot be negative")
|
||||
if item_type is not None:
|
||||
warn(
|
||||
"The item_type argument has been deprecated in AnyIO 4.0. "
|
||||
"Use create_memory_object_stream[YourItemType](...) instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
state = MemoryObjectStreamState[T_Item](max_buffer_size)
|
||||
return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
|
||||
@@ -0,0 +1,202 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import AsyncIterable, Iterable, Mapping, Sequence
|
||||
from io import BytesIO
|
||||
from os import PathLike
|
||||
from subprocess import PIPE, CalledProcessError, CompletedProcess
|
||||
from typing import IO, Any, Union, cast
|
||||
|
||||
from ..abc import Process
|
||||
from ._eventloop import get_async_backend
|
||||
from ._tasks import create_task_group
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeAlias
|
||||
else:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
|
||||
|
||||
|
||||
async def run_process(
|
||||
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
||||
*,
|
||||
input: bytes | None = None,
|
||||
stdin: int | IO[Any] | None = None,
|
||||
stdout: int | IO[Any] | None = PIPE,
|
||||
stderr: int | IO[Any] | None = PIPE,
|
||||
check: bool = True,
|
||||
cwd: StrOrBytesPath | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
startupinfo: Any = None,
|
||||
creationflags: int = 0,
|
||||
start_new_session: bool = False,
|
||||
pass_fds: Sequence[int] = (),
|
||||
user: str | int | None = None,
|
||||
group: str | int | None = None,
|
||||
extra_groups: Iterable[str | int] | None = None,
|
||||
umask: int = -1,
|
||||
) -> CompletedProcess[bytes]:
|
||||
"""
|
||||
Run an external command in a subprocess and wait until it completes.
|
||||
|
||||
.. seealso:: :func:`subprocess.run`
|
||||
|
||||
:param command: either a string to pass to the shell, or an iterable of strings
|
||||
containing the executable name or path and its arguments
|
||||
:param input: bytes passed to the standard input of the subprocess
|
||||
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or `None`; ``input`` overrides this
|
||||
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or `None`
|
||||
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
:data:`subprocess.STDOUT`, a file-like object, or `None`
|
||||
:param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
|
||||
process terminates with a return code other than 0
|
||||
:param cwd: If not ``None``, change the working directory to this before running the
|
||||
command
|
||||
:param env: if not ``None``, this mapping replaces the inherited environment
|
||||
variables from the parent process
|
||||
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
|
||||
to specify process startup parameters (Windows only)
|
||||
:param creationflags: flags that can be used to control the creation of the
|
||||
subprocess (see :class:`subprocess.Popen` for the specifics)
|
||||
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
||||
child process prior to the execution of the subprocess. (POSIX only)
|
||||
:param pass_fds: sequence of file descriptors to keep open between the parent and
|
||||
child processes. (POSIX only)
|
||||
:param user: effective user to run the process as (Python >= 3.9, POSIX only)
|
||||
:param group: effective group to run the process as (Python >= 3.9, POSIX only)
|
||||
:param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,
|
||||
POSIX only)
|
||||
:param umask: if not negative, this umask is applied in the child process before
|
||||
running the given command (Python >= 3.9, POSIX only)
|
||||
:return: an object representing the completed process
|
||||
:raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
|
||||
exits with a nonzero return code
|
||||
|
||||
"""
|
||||
|
||||
async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
|
||||
buffer = BytesIO()
|
||||
async for chunk in stream:
|
||||
buffer.write(chunk)
|
||||
|
||||
stream_contents[index] = buffer.getvalue()
|
||||
|
||||
if stdin is not None and input is not None:
|
||||
raise ValueError("only one of stdin and input is allowed")
|
||||
|
||||
async with await open_process(
|
||||
command,
|
||||
stdin=PIPE if input else stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
startupinfo=startupinfo,
|
||||
creationflags=creationflags,
|
||||
start_new_session=start_new_session,
|
||||
pass_fds=pass_fds,
|
||||
user=user,
|
||||
group=group,
|
||||
extra_groups=extra_groups,
|
||||
umask=umask,
|
||||
) as process:
|
||||
stream_contents: list[bytes | None] = [None, None]
|
||||
async with create_task_group() as tg:
|
||||
if process.stdout:
|
||||
tg.start_soon(drain_stream, process.stdout, 0)
|
||||
|
||||
if process.stderr:
|
||||
tg.start_soon(drain_stream, process.stderr, 1)
|
||||
|
||||
if process.stdin and input:
|
||||
await process.stdin.send(input)
|
||||
await process.stdin.aclose()
|
||||
|
||||
await process.wait()
|
||||
|
||||
output, errors = stream_contents
|
||||
if check and process.returncode != 0:
|
||||
raise CalledProcessError(cast(int, process.returncode), command, output, errors)
|
||||
|
||||
return CompletedProcess(command, cast(int, process.returncode), output, errors)
|
||||
|
||||
|
||||
async def open_process(
|
||||
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
||||
*,
|
||||
stdin: int | IO[Any] | None = PIPE,
|
||||
stdout: int | IO[Any] | None = PIPE,
|
||||
stderr: int | IO[Any] | None = PIPE,
|
||||
cwd: StrOrBytesPath | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
startupinfo: Any = None,
|
||||
creationflags: int = 0,
|
||||
start_new_session: bool = False,
|
||||
pass_fds: Sequence[int] = (),
|
||||
user: str | int | None = None,
|
||||
group: str | int | None = None,
|
||||
extra_groups: Iterable[str | int] | None = None,
|
||||
umask: int = -1,
|
||||
) -> Process:
|
||||
"""
|
||||
Start an external command in a subprocess.
|
||||
|
||||
.. seealso:: :class:`subprocess.Popen`
|
||||
|
||||
:param command: either a string to pass to the shell, or an iterable of strings
|
||||
containing the executable name or path and its arguments
|
||||
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
|
||||
file-like object, or ``None``
|
||||
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or ``None``
|
||||
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
:data:`subprocess.STDOUT`, a file-like object, or ``None``
|
||||
:param cwd: If not ``None``, the working directory is changed before executing
|
||||
:param env: If env is not ``None``, it must be a mapping that defines the
|
||||
environment variables for the new process
|
||||
:param creationflags: flags that can be used to control the creation of the
|
||||
subprocess (see :class:`subprocess.Popen` for the specifics)
|
||||
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
|
||||
to specify process startup parameters (Windows only)
|
||||
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
||||
child process prior to the execution of the subprocess. (POSIX only)
|
||||
:param pass_fds: sequence of file descriptors to keep open between the parent and
|
||||
child processes. (POSIX only)
|
||||
:param user: effective user to run the process as (POSIX only)
|
||||
:param group: effective group to run the process as (POSIX only)
|
||||
:param extra_groups: supplementary groups to set in the subprocess (POSIX only)
|
||||
:param umask: if not negative, this umask is applied in the child process before
|
||||
running the given command (POSIX only)
|
||||
:return: an asynchronous process object
|
||||
|
||||
"""
|
||||
kwargs: dict[str, Any] = {}
|
||||
if user is not None:
|
||||
kwargs["user"] = user
|
||||
|
||||
if group is not None:
|
||||
kwargs["group"] = group
|
||||
|
||||
if extra_groups is not None:
|
||||
kwargs["extra_groups"] = group
|
||||
|
||||
if umask >= 0:
|
||||
kwargs["umask"] = umask
|
||||
|
||||
return await get_async_backend().open_process(
|
||||
command,
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
startupinfo=startupinfo,
|
||||
creationflags=creationflags,
|
||||
start_new_session=start_new_session,
|
||||
pass_fds=pass_fds,
|
||||
**kwargs,
|
||||
)
|
||||
@@ -0,0 +1,732 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from types import TracebackType
|
||||
|
||||
from sniffio import AsyncLibraryNotFoundError
|
||||
|
||||
from ..lowlevel import checkpoint
|
||||
from ._eventloop import get_async_backend
|
||||
from ._exceptions import BusyResourceError
|
||||
from ._tasks import CancelScope
|
||||
from ._testing import TaskInfo, get_current_task
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EventStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CapacityLimiterStatistics:
|
||||
"""
|
||||
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
|
||||
:ivar float total_tokens: total number of available tokens
|
||||
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
|
||||
this limiter
|
||||
:ivar int tasks_waiting: number of tasks waiting on
|
||||
:meth:`~.CapacityLimiter.acquire` or
|
||||
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
|
||||
"""
|
||||
|
||||
borrowed_tokens: int
|
||||
total_tokens: float
|
||||
borrowers: tuple[object, ...]
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LockStatistics:
|
||||
"""
|
||||
:ivar bool locked: flag indicating if this lock is locked or not
|
||||
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
|
||||
lock is not held by any task)
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
|
||||
"""
|
||||
|
||||
locked: bool
|
||||
owner: TaskInfo | None
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ConditionStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
|
||||
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
|
||||
:class:`~.Lock`
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
lock_statistics: LockStatistics
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SemaphoreStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
|
||||
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
class Event:
|
||||
def __new__(cls) -> Event:
|
||||
try:
|
||||
return get_async_backend().create_event()
|
||||
except AsyncLibraryNotFoundError:
|
||||
return EventAdapter()
|
||||
|
||||
def set(self) -> None:
|
||||
"""Set the flag, notifying all listeners."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_set(self) -> bool:
|
||||
"""Return ``True`` if the flag is set, ``False`` if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def wait(self) -> None:
|
||||
"""
|
||||
Wait until the flag has been set.
|
||||
|
||||
If the flag has already been set when this method is called, it returns
|
||||
immediately.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> EventStatistics:
|
||||
"""Return statistics about the current state of this event."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EventAdapter(Event):
|
||||
_internal_event: Event | None = None
|
||||
_is_set: bool = False
|
||||
|
||||
def __new__(cls) -> EventAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
@property
|
||||
def _event(self) -> Event:
|
||||
if self._internal_event is None:
|
||||
self._internal_event = get_async_backend().create_event()
|
||||
if self._is_set:
|
||||
self._internal_event.set()
|
||||
|
||||
return self._internal_event
|
||||
|
||||
def set(self) -> None:
|
||||
if self._internal_event is None:
|
||||
self._is_set = True
|
||||
else:
|
||||
self._event.set()
|
||||
|
||||
def is_set(self) -> bool:
|
||||
if self._internal_event is None:
|
||||
return self._is_set
|
||||
|
||||
return self._internal_event.is_set()
|
||||
|
||||
async def wait(self) -> None:
|
||||
await self._event.wait()
|
||||
|
||||
def statistics(self) -> EventStatistics:
|
||||
if self._internal_event is None:
|
||||
return EventStatistics(tasks_waiting=0)
|
||||
|
||||
return self._internal_event.statistics()
|
||||
|
||||
|
||||
class Lock:
|
||||
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
|
||||
try:
|
||||
return get_async_backend().create_lock(fast_acquire=fast_acquire)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return LockAdapter(fast_acquire=fast_acquire)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the lock."""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the lock."""
|
||||
raise NotImplementedError
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is currently held."""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> LockStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this lock.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LockAdapter(Lock):
|
||||
_internal_lock: Lock | None = None
|
||||
|
||||
def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, *, fast_acquire: bool = False):
|
||||
self._fast_acquire = fast_acquire
|
||||
|
||||
@property
|
||||
def _lock(self) -> Lock:
|
||||
if self._internal_lock is None:
|
||||
self._internal_lock = get_async_backend().create_lock(
|
||||
fast_acquire=self._fast_acquire
|
||||
)
|
||||
|
||||
return self._internal_lock
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self._lock.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self._internal_lock is not None:
|
||||
self._internal_lock.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the lock."""
|
||||
await self._lock.acquire()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
self._lock.acquire_nowait()
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the lock."""
|
||||
self._lock.release()
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is currently held."""
|
||||
return self._lock.locked()
|
||||
|
||||
def statistics(self) -> LockStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this lock.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
if self._internal_lock is None:
|
||||
return LockStatistics(False, None, 0)
|
||||
|
||||
return self._internal_lock.statistics()
|
||||
|
||||
|
||||
class Condition:
|
||||
_owner_task: TaskInfo | None = None
|
||||
|
||||
def __init__(self, lock: Lock | None = None):
|
||||
self._lock = lock or Lock()
|
||||
self._waiters: deque[Event] = deque()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
def _check_acquired(self) -> None:
|
||||
if self._owner_task != get_current_task():
|
||||
raise RuntimeError("The current task is not holding the underlying lock")
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the underlying lock."""
|
||||
await self._lock.acquire()
|
||||
self._owner_task = get_current_task()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the underlying lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
self._lock.acquire_nowait()
|
||||
self._owner_task = get_current_task()
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the underlying lock."""
|
||||
self._lock.release()
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is set."""
|
||||
return self._lock.locked()
|
||||
|
||||
def notify(self, n: int = 1) -> None:
|
||||
"""Notify exactly n listeners."""
|
||||
self._check_acquired()
|
||||
for _ in range(n):
|
||||
try:
|
||||
event = self._waiters.popleft()
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
event.set()
|
||||
|
||||
def notify_all(self) -> None:
|
||||
"""Notify all the listeners."""
|
||||
self._check_acquired()
|
||||
for event in self._waiters:
|
||||
event.set()
|
||||
|
||||
self._waiters.clear()
|
||||
|
||||
async def wait(self) -> None:
|
||||
"""Wait for a notification."""
|
||||
await checkpoint()
|
||||
event = Event()
|
||||
self._waiters.append(event)
|
||||
self.release()
|
||||
try:
|
||||
await event.wait()
|
||||
except BaseException:
|
||||
if not event.is_set():
|
||||
self._waiters.remove(event)
|
||||
|
||||
raise
|
||||
finally:
|
||||
with CancelScope(shield=True):
|
||||
await self.acquire()
|
||||
|
||||
def statistics(self) -> ConditionStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this condition.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return ConditionStatistics(len(self._waiters), self._lock.statistics())
|
||||
|
||||
|
||||
class Semaphore:
|
||||
def __new__(
|
||||
cls,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> Semaphore:
|
||||
try:
|
||||
return get_async_backend().create_semaphore(
|
||||
initial_value, max_value=max_value, fast_acquire=fast_acquire
|
||||
)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return SemaphoreAdapter(initial_value, max_value=max_value)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
):
|
||||
if not isinstance(initial_value, int):
|
||||
raise TypeError("initial_value must be an integer")
|
||||
if initial_value < 0:
|
||||
raise ValueError("initial_value must be >= 0")
|
||||
if max_value is not None:
|
||||
if not isinstance(max_value, int):
|
||||
raise TypeError("max_value must be an integer or None")
|
||||
if max_value < initial_value:
|
||||
raise ValueError(
|
||||
"max_value must be equal to or higher than initial_value"
|
||||
)
|
||||
|
||||
self._fast_acquire = fast_acquire
|
||||
|
||||
async def __aenter__(self) -> Semaphore:
|
||||
await self.acquire()
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Decrement the semaphore value, blocking if necessary."""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the underlying lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""Increment the semaphore value."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def value(self) -> int:
|
||||
"""The current value of the semaphore."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def max_value(self) -> int | None:
|
||||
"""The maximum value of the semaphore."""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> SemaphoreStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this semaphore.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SemaphoreAdapter(Semaphore):
|
||||
_internal_semaphore: Semaphore | None = None
|
||||
|
||||
def __new__(
|
||||
cls,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> SemaphoreAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> None:
|
||||
super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
|
||||
self._initial_value = initial_value
|
||||
self._max_value = max_value
|
||||
|
||||
@property
|
||||
def _semaphore(self) -> Semaphore:
|
||||
if self._internal_semaphore is None:
|
||||
self._internal_semaphore = get_async_backend().create_semaphore(
|
||||
self._initial_value, max_value=self._max_value
|
||||
)
|
||||
|
||||
return self._internal_semaphore
|
||||
|
||||
async def acquire(self) -> None:
|
||||
await self._semaphore.acquire()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
self._semaphore.acquire_nowait()
|
||||
|
||||
def release(self) -> None:
|
||||
self._semaphore.release()
|
||||
|
||||
@property
|
||||
def value(self) -> int:
|
||||
if self._internal_semaphore is None:
|
||||
return self._initial_value
|
||||
|
||||
return self._semaphore.value
|
||||
|
||||
@property
|
||||
def max_value(self) -> int | None:
|
||||
return self._max_value
|
||||
|
||||
def statistics(self) -> SemaphoreStatistics:
|
||||
if self._internal_semaphore is None:
|
||||
return SemaphoreStatistics(tasks_waiting=0)
|
||||
|
||||
return self._semaphore.statistics()
|
||||
|
||||
|
||||
class CapacityLimiter:
|
||||
def __new__(cls, total_tokens: float) -> CapacityLimiter:
|
||||
try:
|
||||
return get_async_backend().create_capacity_limiter(total_tokens)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return CapacityLimiterAdapter(total_tokens)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def total_tokens(self) -> float:
|
||||
"""
|
||||
The total number of tokens available for borrowing.
|
||||
|
||||
This is a read-write property. If the total number of tokens is increased, the
|
||||
proportionate number of tasks waiting on this limiter will be granted their
|
||||
tokens.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The property is now writable.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@total_tokens.setter
|
||||
def total_tokens(self, value: float) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def borrowed_tokens(self) -> int:
|
||||
"""The number of tokens that have currently been borrowed."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def available_tokens(self) -> float:
|
||||
"""The number of tokens currently available to be borrowed"""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire a token for the current task without waiting for one to become
|
||||
available.
|
||||
|
||||
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
||||
"""
|
||||
Acquire a token without waiting for one to become available.
|
||||
|
||||
:param borrower: the entity borrowing a token
|
||||
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""
|
||||
Acquire a token for the current task, waiting if necessary for one to become
|
||||
available.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
||||
"""
|
||||
Acquire a token, waiting if necessary for one to become available.
|
||||
|
||||
:param borrower: the entity borrowing a token
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""
|
||||
Release the token held by the current task.
|
||||
|
||||
:raises RuntimeError: if the current task has not borrowed a token from this
|
||||
limiter.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release_on_behalf_of(self, borrower: object) -> None:
|
||||
"""
|
||||
Release the token held by the given borrower.
|
||||
|
||||
:raises RuntimeError: if the borrower has not borrowed a token from this
|
||||
limiter.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> CapacityLimiterStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this limiter.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CapacityLimiterAdapter(CapacityLimiter):
|
||||
_internal_limiter: CapacityLimiter | None = None
|
||||
|
||||
def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, total_tokens: float) -> None:
|
||||
self.total_tokens = total_tokens
|
||||
|
||||
@property
|
||||
def _limiter(self) -> CapacityLimiter:
|
||||
if self._internal_limiter is None:
|
||||
self._internal_limiter = get_async_backend().create_capacity_limiter(
|
||||
self._total_tokens
|
||||
)
|
||||
|
||||
return self._internal_limiter
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self._limiter.__aenter__()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
@property
|
||||
def total_tokens(self) -> float:
|
||||
if self._internal_limiter is None:
|
||||
return self._total_tokens
|
||||
|
||||
return self._internal_limiter.total_tokens
|
||||
|
||||
@total_tokens.setter
|
||||
def total_tokens(self, value: float) -> None:
|
||||
if not isinstance(value, int) and value is not math.inf:
|
||||
raise TypeError("total_tokens must be an int or math.inf")
|
||||
elif value < 1:
|
||||
raise ValueError("total_tokens must be >= 1")
|
||||
|
||||
if self._internal_limiter is None:
|
||||
self._total_tokens = value
|
||||
return
|
||||
|
||||
self._limiter.total_tokens = value
|
||||
|
||||
@property
|
||||
def borrowed_tokens(self) -> int:
|
||||
if self._internal_limiter is None:
|
||||
return 0
|
||||
|
||||
return self._internal_limiter.borrowed_tokens
|
||||
|
||||
@property
|
||||
def available_tokens(self) -> float:
|
||||
if self._internal_limiter is None:
|
||||
return self._total_tokens
|
||||
|
||||
return self._internal_limiter.available_tokens
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
self._limiter.acquire_nowait()
|
||||
|
||||
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
||||
self._limiter.acquire_on_behalf_of_nowait(borrower)
|
||||
|
||||
async def acquire(self) -> None:
|
||||
await self._limiter.acquire()
|
||||
|
||||
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
||||
await self._limiter.acquire_on_behalf_of(borrower)
|
||||
|
||||
def release(self) -> None:
|
||||
self._limiter.release()
|
||||
|
||||
def release_on_behalf_of(self, borrower: object) -> None:
|
||||
self._limiter.release_on_behalf_of(borrower)
|
||||
|
||||
def statistics(self) -> CapacityLimiterStatistics:
|
||||
if self._internal_limiter is None:
|
||||
return CapacityLimiterStatistics(
|
||||
borrowed_tokens=0,
|
||||
total_tokens=self.total_tokens,
|
||||
borrowers=(),
|
||||
tasks_waiting=0,
|
||||
)
|
||||
|
||||
return self._internal_limiter.statistics()
|
||||
|
||||
|
||||
class ResourceGuard:
|
||||
"""
|
||||
A context manager for ensuring that a resource is only used by a single task at a
|
||||
time.
|
||||
|
||||
Entering this context manager while the previous has not exited it yet will trigger
|
||||
:exc:`BusyResourceError`.
|
||||
|
||||
:param action: the action to guard against (visible in the :exc:`BusyResourceError`
|
||||
when triggered, e.g. "Another task is already {action} this resource")
|
||||
|
||||
.. versionadded:: 4.1
|
||||
"""
|
||||
|
||||
__slots__ = "action", "_guarded"
|
||||
|
||||
def __init__(self, action: str = "using"):
|
||||
self.action: str = action
|
||||
self._guarded = False
|
||||
|
||||
def __enter__(self) -> None:
|
||||
if self._guarded:
|
||||
raise BusyResourceError(self.action)
|
||||
|
||||
self._guarded = True
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self._guarded = False
|
||||
@@ -0,0 +1,158 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from types import TracebackType
|
||||
|
||||
from ..abc._tasks import TaskGroup, TaskStatus
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
class _IgnoredTaskStatus(TaskStatus[object]):
|
||||
def started(self, value: object = None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
TASK_STATUS_IGNORED = _IgnoredTaskStatus()
|
||||
|
||||
|
||||
class CancelScope:
|
||||
"""
|
||||
Wraps a unit of work that can be made separately cancellable.
|
||||
|
||||
:param deadline: The time (clock value) when this scope is cancelled automatically
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
"""
|
||||
|
||||
def __new__(
|
||||
cls, *, deadline: float = math.inf, shield: bool = False
|
||||
) -> CancelScope:
|
||||
return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Cancel this scope immediately."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def deadline(self) -> float:
|
||||
"""
|
||||
The time (clock value) when this scope is cancelled automatically.
|
||||
|
||||
Will be ``float('inf')`` if no timeout has been set.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@deadline.setter
|
||||
def deadline(self, value: float) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cancel_called(self) -> bool:
|
||||
"""``True`` if :meth:`cancel` has been called."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cancelled_caught(self) -> bool:
|
||||
"""
|
||||
``True`` if this scope suppressed a cancellation exception it itself raised.
|
||||
|
||||
This is typically used to check if any work was interrupted, or to see if the
|
||||
scope was cancelled due to its deadline being reached. The value will, however,
|
||||
only be ``True`` if the cancellation was triggered by the scope itself (and not
|
||||
an outer scope).
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def shield(self) -> bool:
|
||||
"""
|
||||
``True`` if this scope is shielded from external cancellation.
|
||||
|
||||
While a scope is shielded, it will not receive cancellations from outside.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@shield.setter
|
||||
def shield(self, value: bool) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
def __enter__(self) -> CancelScope:
|
||||
raise NotImplementedError
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@contextmanager
|
||||
def fail_after(
|
||||
delay: float | None, shield: bool = False
|
||||
) -> Generator[CancelScope, None, None]:
|
||||
"""
|
||||
Create a context manager which raises a :class:`TimeoutError` if does not finish in
|
||||
time.
|
||||
|
||||
:param delay: maximum allowed time (in seconds) before raising the exception, or
|
||||
``None`` to disable the timeout
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
:return: a context manager that yields a cancel scope
|
||||
:rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
|
||||
|
||||
"""
|
||||
current_time = get_async_backend().current_time
|
||||
deadline = (current_time() + delay) if delay is not None else math.inf
|
||||
with get_async_backend().create_cancel_scope(
|
||||
deadline=deadline, shield=shield
|
||||
) as cancel_scope:
|
||||
yield cancel_scope
|
||||
|
||||
if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
|
||||
raise TimeoutError
|
||||
|
||||
|
||||
def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
|
||||
"""
|
||||
Create a cancel scope with a deadline that expires after the given delay.
|
||||
|
||||
:param delay: maximum allowed time (in seconds) before exiting the context block, or
|
||||
``None`` to disable the timeout
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
:return: a cancel scope
|
||||
|
||||
"""
|
||||
deadline = (
|
||||
(get_async_backend().current_time() + delay) if delay is not None else math.inf
|
||||
)
|
||||
return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
|
||||
|
||||
|
||||
def current_effective_deadline() -> float:
|
||||
"""
|
||||
Return the nearest deadline among all the cancel scopes effective for the current
|
||||
task.
|
||||
|
||||
:return: a clock value from the event loop's internal clock (or ``float('inf')`` if
|
||||
there is no deadline in effect, or ``float('-inf')`` if the current scope has
|
||||
been cancelled)
|
||||
:rtype: float
|
||||
|
||||
"""
|
||||
return get_async_backend().current_effective_deadline()
|
||||
|
||||
|
||||
def create_task_group() -> TaskGroup:
|
||||
"""
|
||||
Create a task group.
|
||||
|
||||
:return: a task group
|
||||
|
||||
"""
|
||||
return get_async_backend().create_task_group()
|
||||
@@ -0,0 +1,616 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from collections.abc import Iterable
|
||||
from io import BytesIO, TextIOWrapper
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AnyStr,
|
||||
Generic,
|
||||
overload,
|
||||
)
|
||||
|
||||
from .. import to_thread
|
||||
from .._core._fileio import AsyncFile
|
||||
from ..lowlevel import checkpoint_if_cancelled
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
|
||||
|
||||
|
||||
class TemporaryFile(Generic[AnyStr]):
|
||||
"""
|
||||
An asynchronous temporary file that is automatically created and cleaned up.
|
||||
|
||||
This class provides an asynchronous context manager interface to a temporary file.
|
||||
The file is created using Python's standard `tempfile.TemporaryFile` function in a
|
||||
background thread, and is wrapped as an asynchronous file using `AsyncFile`.
|
||||
|
||||
:param mode: The mode in which the file is opened. Defaults to "w+b".
|
||||
:param buffering: The buffering policy (-1 means the default buffering).
|
||||
:param encoding: The encoding used to decode or encode the file. Only applicable in
|
||||
text mode.
|
||||
:param newline: Controls how universal newlines mode works (only applicable in text
|
||||
mode).
|
||||
:param suffix: The suffix for the temporary file name.
|
||||
:param prefix: The prefix for the temporary file name.
|
||||
:param dir: The directory in which the temporary file is created.
|
||||
:param errors: The error handling scheme used for encoding/decoding errors.
|
||||
"""
|
||||
|
||||
_async_file: AsyncFile[AnyStr]
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self: TemporaryFile[bytes],
|
||||
mode: OpenBinaryMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
suffix: str | None = ...,
|
||||
prefix: str | None = ...,
|
||||
dir: str | None = ...,
|
||||
*,
|
||||
errors: str | None = ...,
|
||||
): ...
|
||||
@overload
|
||||
def __init__(
|
||||
self: TemporaryFile[str],
|
||||
mode: OpenTextMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
suffix: str | None = ...,
|
||||
prefix: str | None = ...,
|
||||
dir: str | None = ...,
|
||||
*,
|
||||
errors: str | None = ...,
|
||||
): ...
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: OpenTextMode | OpenBinaryMode = "w+b",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
newline: str | None = None,
|
||||
suffix: str | None = None,
|
||||
prefix: str | None = None,
|
||||
dir: str | None = None,
|
||||
*,
|
||||
errors: str | None = None,
|
||||
) -> None:
|
||||
self.mode = mode
|
||||
self.buffering = buffering
|
||||
self.encoding = encoding
|
||||
self.newline = newline
|
||||
self.suffix: str | None = suffix
|
||||
self.prefix: str | None = prefix
|
||||
self.dir: str | None = dir
|
||||
self.errors = errors
|
||||
|
||||
async def __aenter__(self) -> AsyncFile[AnyStr]:
|
||||
fp = await to_thread.run_sync(
|
||||
lambda: tempfile.TemporaryFile(
|
||||
self.mode,
|
||||
self.buffering,
|
||||
self.encoding,
|
||||
self.newline,
|
||||
self.suffix,
|
||||
self.prefix,
|
||||
self.dir,
|
||||
errors=self.errors,
|
||||
)
|
||||
)
|
||||
self._async_file = AsyncFile(fp)
|
||||
return self._async_file
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None:
|
||||
await self._async_file.aclose()
|
||||
|
||||
|
||||
class NamedTemporaryFile(Generic[AnyStr]):
|
||||
"""
|
||||
An asynchronous named temporary file that is automatically created and cleaned up.
|
||||
|
||||
This class provides an asynchronous context manager for a temporary file with a
|
||||
visible name in the file system. It uses Python's standard
|
||||
:func:`~tempfile.NamedTemporaryFile` function and wraps the file object with
|
||||
:class:`AsyncFile` for asynchronous operations.
|
||||
|
||||
:param mode: The mode in which the file is opened. Defaults to "w+b".
|
||||
:param buffering: The buffering policy (-1 means the default buffering).
|
||||
:param encoding: The encoding used to decode or encode the file. Only applicable in
|
||||
text mode.
|
||||
:param newline: Controls how universal newlines mode works (only applicable in text
|
||||
mode).
|
||||
:param suffix: The suffix for the temporary file name.
|
||||
:param prefix: The prefix for the temporary file name.
|
||||
:param dir: The directory in which the temporary file is created.
|
||||
:param delete: Whether to delete the file when it is closed.
|
||||
:param errors: The error handling scheme used for encoding/decoding errors.
|
||||
:param delete_on_close: (Python 3.12+) Whether to delete the file on close.
|
||||
"""
|
||||
|
||||
_async_file: AsyncFile[AnyStr]
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self: NamedTemporaryFile[bytes],
|
||||
mode: OpenBinaryMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
suffix: str | None = ...,
|
||||
prefix: str | None = ...,
|
||||
dir: str | None = ...,
|
||||
delete: bool = ...,
|
||||
*,
|
||||
errors: str | None = ...,
|
||||
delete_on_close: bool = ...,
|
||||
): ...
|
||||
@overload
|
||||
def __init__(
|
||||
self: NamedTemporaryFile[str],
|
||||
mode: OpenTextMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
suffix: str | None = ...,
|
||||
prefix: str | None = ...,
|
||||
dir: str | None = ...,
|
||||
delete: bool = ...,
|
||||
*,
|
||||
errors: str | None = ...,
|
||||
delete_on_close: bool = ...,
|
||||
): ...
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mode: OpenBinaryMode | OpenTextMode = "w+b",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
newline: str | None = None,
|
||||
suffix: str | None = None,
|
||||
prefix: str | None = None,
|
||||
dir: str | None = None,
|
||||
delete: bool = True,
|
||||
*,
|
||||
errors: str | None = None,
|
||||
delete_on_close: bool = True,
|
||||
) -> None:
|
||||
self._params: dict[str, Any] = {
|
||||
"mode": mode,
|
||||
"buffering": buffering,
|
||||
"encoding": encoding,
|
||||
"newline": newline,
|
||||
"suffix": suffix,
|
||||
"prefix": prefix,
|
||||
"dir": dir,
|
||||
"delete": delete,
|
||||
"errors": errors,
|
||||
}
|
||||
if sys.version_info >= (3, 12):
|
||||
self._params["delete_on_close"] = delete_on_close
|
||||
|
||||
async def __aenter__(self) -> AsyncFile[AnyStr]:
|
||||
fp = await to_thread.run_sync(
|
||||
lambda: tempfile.NamedTemporaryFile(**self._params)
|
||||
)
|
||||
self._async_file = AsyncFile(fp)
|
||||
return self._async_file
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None:
|
||||
await self._async_file.aclose()
|
||||
|
||||
|
||||
class SpooledTemporaryFile(AsyncFile[AnyStr]):
|
||||
"""
|
||||
An asynchronous spooled temporary file that starts in memory and is spooled to disk.
|
||||
|
||||
This class provides an asynchronous interface to a spooled temporary file, much like
|
||||
Python's standard :class:`~tempfile.SpooledTemporaryFile`. It supports asynchronous
|
||||
write operations and provides a method to force a rollover to disk.
|
||||
|
||||
:param max_size: Maximum size in bytes before the file is rolled over to disk.
|
||||
:param mode: The mode in which the file is opened. Defaults to "w+b".
|
||||
:param buffering: The buffering policy (-1 means the default buffering).
|
||||
:param encoding: The encoding used to decode or encode the file (text mode only).
|
||||
:param newline: Controls how universal newlines mode works (text mode only).
|
||||
:param suffix: The suffix for the temporary file name.
|
||||
:param prefix: The prefix for the temporary file name.
|
||||
:param dir: The directory in which the temporary file is created.
|
||||
:param errors: The error handling scheme used for encoding/decoding errors.
|
||||
"""
|
||||
|
||||
_rolled: bool = False
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self: SpooledTemporaryFile[bytes],
|
||||
max_size: int = ...,
|
||||
mode: OpenBinaryMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
suffix: str | None = ...,
|
||||
prefix: str | None = ...,
|
||||
dir: str | None = ...,
|
||||
*,
|
||||
errors: str | None = ...,
|
||||
): ...
|
||||
@overload
|
||||
def __init__(
|
||||
self: SpooledTemporaryFile[str],
|
||||
max_size: int = ...,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
suffix: str | None = ...,
|
||||
prefix: str | None = ...,
|
||||
dir: str | None = ...,
|
||||
*,
|
||||
errors: str | None = ...,
|
||||
): ...
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_size: int = 0,
|
||||
mode: OpenBinaryMode | OpenTextMode = "w+b",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
newline: str | None = None,
|
||||
suffix: str | None = None,
|
||||
prefix: str | None = None,
|
||||
dir: str | None = None,
|
||||
*,
|
||||
errors: str | None = None,
|
||||
) -> None:
|
||||
self._tempfile_params: dict[str, Any] = {
|
||||
"mode": mode,
|
||||
"buffering": buffering,
|
||||
"encoding": encoding,
|
||||
"newline": newline,
|
||||
"suffix": suffix,
|
||||
"prefix": prefix,
|
||||
"dir": dir,
|
||||
"errors": errors,
|
||||
}
|
||||
self._max_size = max_size
|
||||
if "b" in mode:
|
||||
super().__init__(BytesIO()) # type: ignore[arg-type]
|
||||
else:
|
||||
super().__init__(
|
||||
TextIOWrapper( # type: ignore[arg-type]
|
||||
BytesIO(),
|
||||
encoding=encoding,
|
||||
errors=errors,
|
||||
newline=newline,
|
||||
write_through=True,
|
||||
)
|
||||
)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
if not self._rolled:
|
||||
self._fp.close()
|
||||
return
|
||||
|
||||
await super().aclose()
|
||||
|
||||
async def _check(self) -> None:
|
||||
if self._rolled or self._fp.tell() < self._max_size:
|
||||
return
|
||||
|
||||
await self.rollover()
|
||||
|
||||
async def rollover(self) -> None:
|
||||
if self._rolled:
|
||||
return
|
||||
|
||||
self._rolled = True
|
||||
buffer = self._fp
|
||||
buffer.seek(0)
|
||||
self._fp = await to_thread.run_sync(
|
||||
lambda: tempfile.TemporaryFile(**self._tempfile_params)
|
||||
)
|
||||
await self.write(buffer.read())
|
||||
buffer.close()
|
||||
|
||||
@property
|
||||
def closed(self) -> bool:
|
||||
return self._fp.closed
|
||||
|
||||
async def read(self, size: int = -1) -> AnyStr:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.read(size)
|
||||
|
||||
return await super().read(size) # type: ignore[return-value]
|
||||
|
||||
async def read1(self: SpooledTemporaryFile[bytes], size: int = -1) -> bytes:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.read1(size)
|
||||
|
||||
return await super().read1(size)
|
||||
|
||||
async def readline(self) -> AnyStr:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.readline()
|
||||
|
||||
return await super().readline() # type: ignore[return-value]
|
||||
|
||||
async def readlines(self) -> list[AnyStr]:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.readlines()
|
||||
|
||||
return await super().readlines() # type: ignore[return-value]
|
||||
|
||||
async def readinto(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
self._fp.readinto(b)
|
||||
|
||||
return await super().readinto(b)
|
||||
|
||||
async def readinto1(self: SpooledTemporaryFile[bytes], b: WriteableBuffer) -> int:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
self._fp.readinto(b)
|
||||
|
||||
return await super().readinto1(b)
|
||||
|
||||
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.seek(offset, whence)
|
||||
|
||||
return await super().seek(offset, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.tell()
|
||||
|
||||
return await super().tell()
|
||||
|
||||
async def truncate(self, size: int | None = None) -> int:
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
return self._fp.truncate(size)
|
||||
|
||||
return await super().truncate(size)
|
||||
|
||||
@overload
|
||||
async def write(self: SpooledTemporaryFile[bytes], b: ReadableBuffer) -> int: ...
|
||||
@overload
|
||||
async def write(self: SpooledTemporaryFile[str], b: str) -> int: ...
|
||||
|
||||
async def write(self, b: ReadableBuffer | str) -> int:
|
||||
"""
|
||||
Asynchronously write data to the spooled temporary file.
|
||||
|
||||
If the file has not yet been rolled over, the data is written synchronously,
|
||||
and a rollover is triggered if the size exceeds the maximum size.
|
||||
|
||||
:param s: The data to write.
|
||||
:return: The number of bytes written.
|
||||
:raises RuntimeError: If the underlying file is not initialized.
|
||||
|
||||
"""
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
result = self._fp.write(b)
|
||||
await self._check()
|
||||
return result
|
||||
|
||||
return await super().write(b) # type: ignore[misc]
|
||||
|
||||
@overload
|
||||
async def writelines(
|
||||
self: SpooledTemporaryFile[bytes], lines: Iterable[ReadableBuffer]
|
||||
) -> None: ...
|
||||
@overload
|
||||
async def writelines(
|
||||
self: SpooledTemporaryFile[str], lines: Iterable[str]
|
||||
) -> None: ...
|
||||
|
||||
async def writelines(self, lines: Iterable[str] | Iterable[ReadableBuffer]) -> None:
|
||||
"""
|
||||
Asynchronously write a list of lines to the spooled temporary file.
|
||||
|
||||
If the file has not yet been rolled over, the lines are written synchronously,
|
||||
and a rollover is triggered if the size exceeds the maximum size.
|
||||
|
||||
:param lines: An iterable of lines to write.
|
||||
:raises RuntimeError: If the underlying file is not initialized.
|
||||
|
||||
"""
|
||||
if not self._rolled:
|
||||
await checkpoint_if_cancelled()
|
||||
result = self._fp.writelines(lines)
|
||||
await self._check()
|
||||
return result
|
||||
|
||||
return await super().writelines(lines) # type: ignore[misc]
|
||||
|
||||
|
||||
class TemporaryDirectory(Generic[AnyStr]):
|
||||
"""
|
||||
An asynchronous temporary directory that is created and cleaned up automatically.
|
||||
|
||||
This class provides an asynchronous context manager for creating a temporary
|
||||
directory. It wraps Python's standard :class:`~tempfile.TemporaryDirectory` to
|
||||
perform directory creation and cleanup operations in a background thread.
|
||||
|
||||
:param suffix: Suffix to be added to the temporary directory name.
|
||||
:param prefix: Prefix to be added to the temporary directory name.
|
||||
:param dir: The parent directory where the temporary directory is created.
|
||||
:param ignore_cleanup_errors: Whether to ignore errors during cleanup
|
||||
(Python 3.10+).
|
||||
:param delete: Whether to delete the directory upon closing (Python 3.12+).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
suffix: AnyStr | None = None,
|
||||
prefix: AnyStr | None = None,
|
||||
dir: AnyStr | None = None,
|
||||
*,
|
||||
ignore_cleanup_errors: bool = False,
|
||||
delete: bool = True,
|
||||
) -> None:
|
||||
self.suffix: AnyStr | None = suffix
|
||||
self.prefix: AnyStr | None = prefix
|
||||
self.dir: AnyStr | None = dir
|
||||
self.ignore_cleanup_errors = ignore_cleanup_errors
|
||||
self.delete = delete
|
||||
|
||||
self._tempdir: tempfile.TemporaryDirectory | None = None
|
||||
|
||||
async def __aenter__(self) -> str:
|
||||
params: dict[str, Any] = {
|
||||
"suffix": self.suffix,
|
||||
"prefix": self.prefix,
|
||||
"dir": self.dir,
|
||||
}
|
||||
if sys.version_info >= (3, 10):
|
||||
params["ignore_cleanup_errors"] = self.ignore_cleanup_errors
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
params["delete"] = self.delete
|
||||
|
||||
self._tempdir = await to_thread.run_sync(
|
||||
lambda: tempfile.TemporaryDirectory(**params)
|
||||
)
|
||||
return await to_thread.run_sync(self._tempdir.__enter__)
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> None:
|
||||
if self._tempdir is not None:
|
||||
await to_thread.run_sync(
|
||||
self._tempdir.__exit__, exc_type, exc_value, traceback
|
||||
)
|
||||
|
||||
async def cleanup(self) -> None:
|
||||
if self._tempdir is not None:
|
||||
await to_thread.run_sync(self._tempdir.cleanup)
|
||||
|
||||
|
||||
@overload
|
||||
async def mkstemp(
|
||||
suffix: str | None = None,
|
||||
prefix: str | None = None,
|
||||
dir: str | None = None,
|
||||
text: bool = False,
|
||||
) -> tuple[int, str]: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def mkstemp(
|
||||
suffix: bytes | None = None,
|
||||
prefix: bytes | None = None,
|
||||
dir: bytes | None = None,
|
||||
text: bool = False,
|
||||
) -> tuple[int, bytes]: ...
|
||||
|
||||
|
||||
async def mkstemp(
|
||||
suffix: AnyStr | None = None,
|
||||
prefix: AnyStr | None = None,
|
||||
dir: AnyStr | None = None,
|
||||
text: bool = False,
|
||||
) -> tuple[int, str | bytes]:
|
||||
"""
|
||||
Asynchronously create a temporary file and return an OS-level handle and the file
|
||||
name.
|
||||
|
||||
This function wraps `tempfile.mkstemp` and executes it in a background thread.
|
||||
|
||||
:param suffix: Suffix to be added to the file name.
|
||||
:param prefix: Prefix to be added to the file name.
|
||||
:param dir: Directory in which the temporary file is created.
|
||||
:param text: Whether the file is opened in text mode.
|
||||
:return: A tuple containing the file descriptor and the file name.
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(tempfile.mkstemp, suffix, prefix, dir, text)
|
||||
|
||||
|
||||
@overload
|
||||
async def mkdtemp(
|
||||
suffix: str | None = None,
|
||||
prefix: str | None = None,
|
||||
dir: str | None = None,
|
||||
) -> str: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def mkdtemp(
|
||||
suffix: bytes | None = None,
|
||||
prefix: bytes | None = None,
|
||||
dir: bytes | None = None,
|
||||
) -> bytes: ...
|
||||
|
||||
|
||||
async def mkdtemp(
|
||||
suffix: AnyStr | None = None,
|
||||
prefix: AnyStr | None = None,
|
||||
dir: AnyStr | None = None,
|
||||
) -> str | bytes:
|
||||
"""
|
||||
Asynchronously create a temporary directory and return its path.
|
||||
|
||||
This function wraps `tempfile.mkdtemp` and executes it in a background thread.
|
||||
|
||||
:param suffix: Suffix to be added to the directory name.
|
||||
:param prefix: Prefix to be added to the directory name.
|
||||
:param dir: Parent directory where the temporary directory is created.
|
||||
:return: The path of the created temporary directory.
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(tempfile.mkdtemp, suffix, prefix, dir)
|
||||
|
||||
|
||||
async def gettempdir() -> str:
|
||||
"""
|
||||
Asynchronously return the name of the directory used for temporary files.
|
||||
|
||||
This function wraps `tempfile.gettempdir` and executes it in a background thread.
|
||||
|
||||
:return: The path of the temporary directory as a string.
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(tempfile.gettempdir)
|
||||
|
||||
|
||||
async def gettempdirb() -> bytes:
|
||||
"""
|
||||
Asynchronously return the name of the directory used for temporary files in bytes.
|
||||
|
||||
This function wraps `tempfile.gettempdirb` and executes it in a background thread.
|
||||
|
||||
:return: The path of the temporary directory as bytes.
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(tempfile.gettempdirb)
|
||||
@@ -0,0 +1,78 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable, Generator
|
||||
from typing import Any, cast
|
||||
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
class TaskInfo:
|
||||
"""
|
||||
Represents an asynchronous task.
|
||||
|
||||
:ivar int id: the unique identifier of the task
|
||||
:ivar parent_id: the identifier of the parent task, if any
|
||||
:vartype parent_id: Optional[int]
|
||||
:ivar str name: the description of the task (if any)
|
||||
:ivar ~collections.abc.Coroutine coro: the coroutine object of the task
|
||||
"""
|
||||
|
||||
__slots__ = "_name", "id", "parent_id", "name", "coro"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id: int,
|
||||
parent_id: int | None,
|
||||
name: str | None,
|
||||
coro: Generator[Any, Any, Any] | Awaitable[Any],
|
||||
):
|
||||
func = get_current_task
|
||||
self._name = f"{func.__module__}.{func.__qualname__}"
|
||||
self.id: int = id
|
||||
self.parent_id: int | None = parent_id
|
||||
self.name: str | None = name
|
||||
self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, TaskInfo):
|
||||
return self.id == other.id
|
||||
|
||||
return NotImplemented
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.id)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
|
||||
|
||||
def has_pending_cancellation(self) -> bool:
|
||||
"""
|
||||
Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
def get_current_task() -> TaskInfo:
|
||||
"""
|
||||
Return the current task.
|
||||
|
||||
:return: a representation of the current task
|
||||
|
||||
"""
|
||||
return get_async_backend().get_current_task()
|
||||
|
||||
|
||||
def get_running_tasks() -> list[TaskInfo]:
|
||||
"""
|
||||
Return a list of running tasks in the current event loop.
|
||||
|
||||
:return: a list of task info objects
|
||||
|
||||
"""
|
||||
return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
|
||||
|
||||
|
||||
async def wait_all_tasks_blocked() -> None:
|
||||
"""Wait until all other tasks are waiting for something."""
|
||||
await get_async_backend().wait_all_tasks_blocked()
|
||||
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any, TypeVar, final, overload
|
||||
|
||||
from ._exceptions import TypedAttributeLookupError
|
||||
|
||||
T_Attr = TypeVar("T_Attr")
|
||||
T_Default = TypeVar("T_Default")
|
||||
undefined = object()
|
||||
|
||||
|
||||
def typed_attribute() -> Any:
|
||||
"""Return a unique object, used to mark typed attributes."""
|
||||
return object()
|
||||
|
||||
|
||||
class TypedAttributeSet:
|
||||
"""
|
||||
Superclass for typed attribute collections.
|
||||
|
||||
Checks that every public attribute of every subclass has a type annotation.
|
||||
"""
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
|
||||
for attrname in dir(cls):
|
||||
if not attrname.startswith("_") and attrname not in annotations:
|
||||
raise TypeError(
|
||||
f"Attribute {attrname!r} is missing its type annotation"
|
||||
)
|
||||
|
||||
super().__init_subclass__()
|
||||
|
||||
|
||||
class TypedAttributeProvider:
|
||||
"""Base class for classes that wish to provide typed extra attributes."""
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
|
||||
"""
|
||||
A mapping of the extra attributes to callables that return the corresponding
|
||||
values.
|
||||
|
||||
If the provider wraps another provider, the attributes from that wrapper should
|
||||
also be included in the returned mapping (but the wrapper may override the
|
||||
callables from the wrapped instance).
|
||||
|
||||
"""
|
||||
return {}
|
||||
|
||||
@overload
|
||||
def extra(self, attribute: T_Attr) -> T_Attr: ...
|
||||
|
||||
@overload
|
||||
def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
|
||||
|
||||
@final
|
||||
def extra(self, attribute: Any, default: object = undefined) -> object:
|
||||
"""
|
||||
extra(attribute, default=undefined)
|
||||
|
||||
Return the value of the given typed extra attribute.
|
||||
|
||||
:param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
|
||||
look for
|
||||
:param default: the value that should be returned if no value is found for the
|
||||
attribute
|
||||
:raises ~anyio.TypedAttributeLookupError: if the search failed and no default
|
||||
value was given
|
||||
|
||||
"""
|
||||
try:
|
||||
getter = self.extra_attributes[attribute]
|
||||
except KeyError:
|
||||
if default is undefined:
|
||||
raise TypedAttributeLookupError("Attribute not found") from None
|
||||
else:
|
||||
return default
|
||||
|
||||
return getter()
|
||||
@@ -0,0 +1,55 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ._eventloop import AsyncBackend as AsyncBackend
|
||||
from ._resources import AsyncResource as AsyncResource
|
||||
from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
|
||||
from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
|
||||
from ._sockets import IPAddressType as IPAddressType
|
||||
from ._sockets import IPSockAddrType as IPSockAddrType
|
||||
from ._sockets import SocketAttribute as SocketAttribute
|
||||
from ._sockets import SocketListener as SocketListener
|
||||
from ._sockets import SocketStream as SocketStream
|
||||
from ._sockets import UDPPacketType as UDPPacketType
|
||||
from ._sockets import UDPSocket as UDPSocket
|
||||
from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
|
||||
from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
|
||||
from ._sockets import UNIXSocketStream as UNIXSocketStream
|
||||
from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
|
||||
from ._streams import AnyByteSendStream as AnyByteSendStream
|
||||
from ._streams import AnyByteStream as AnyByteStream
|
||||
from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
|
||||
from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
|
||||
from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
|
||||
from ._streams import ByteReceiveStream as ByteReceiveStream
|
||||
from ._streams import ByteSendStream as ByteSendStream
|
||||
from ._streams import ByteStream as ByteStream
|
||||
from ._streams import Listener as Listener
|
||||
from ._streams import ObjectReceiveStream as ObjectReceiveStream
|
||||
from ._streams import ObjectSendStream as ObjectSendStream
|
||||
from ._streams import ObjectStream as ObjectStream
|
||||
from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
|
||||
from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
|
||||
from ._streams import UnreliableObjectStream as UnreliableObjectStream
|
||||
from ._subprocesses import Process as Process
|
||||
from ._tasks import TaskGroup as TaskGroup
|
||||
from ._tasks import TaskStatus as TaskStatus
|
||||
from ._testing import TestRunner as TestRunner
|
||||
|
||||
# Re-exported here, for backwards compatibility
|
||||
# isort: off
|
||||
from .._core._synchronization import (
|
||||
CapacityLimiter as CapacityLimiter,
|
||||
Condition as Condition,
|
||||
Event as Event,
|
||||
Lock as Lock,
|
||||
Semaphore as Semaphore,
|
||||
)
|
||||
from .._core._tasks import CancelScope as CancelScope
|
||||
from ..from_thread import BlockingPortal as BlockingPortal
|
||||
|
||||
# Re-export imports so they look like they live directly in this package
|
||||
for __value in list(locals().values()):
|
||||
if getattr(__value, "__module__", "").startswith("anyio.abc."):
|
||||
__value.__module__ = __name__
|
||||
|
||||
del __value
|
||||
@@ -0,0 +1,376 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
|
||||
from contextlib import AbstractContextManager
|
||||
from os import PathLike
|
||||
from signal import Signals
|
||||
from socket import AddressFamily, SocketKind, socket
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeAlias
|
||||
else:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import HasFileno
|
||||
|
||||
from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore
|
||||
from .._core._tasks import CancelScope
|
||||
from .._core._testing import TaskInfo
|
||||
from ..from_thread import BlockingPortal
|
||||
from ._sockets import (
|
||||
ConnectedUDPSocket,
|
||||
ConnectedUNIXDatagramSocket,
|
||||
IPSockAddrType,
|
||||
SocketListener,
|
||||
SocketStream,
|
||||
UDPSocket,
|
||||
UNIXDatagramSocket,
|
||||
UNIXSocketStream,
|
||||
)
|
||||
from ._subprocesses import Process
|
||||
from ._tasks import TaskGroup
|
||||
from ._testing import TestRunner
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
|
||||
|
||||
|
||||
class AsyncBackend(metaclass=ABCMeta):
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
options: dict[str, Any],
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Run the given coroutine function in an asynchronous event loop.
|
||||
|
||||
The current thread must not be already running an event loop.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to ``func``
|
||||
:param kwargs: positional arguments to ``func``
|
||||
:param options: keyword arguments to call the backend ``run()`` implementation
|
||||
with
|
||||
:return: the return value of the coroutine function
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_token(cls) -> object:
|
||||
"""
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_time(cls) -> float:
|
||||
"""
|
||||
Return the current value of the event loop's internal clock.
|
||||
|
||||
:return: the clock value (seconds)
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def cancelled_exception_class(cls) -> type[BaseException]:
|
||||
"""Return the exception class that is raised in a task if it's cancelled."""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def checkpoint(cls) -> None:
|
||||
"""
|
||||
Check if the task has been cancelled, and allow rescheduling of other tasks.
|
||||
|
||||
This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
|
||||
:meth:`cancel_shielded_checkpoint`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def checkpoint_if_cancelled(cls) -> None:
|
||||
"""
|
||||
Check if the current task group has been cancelled.
|
||||
|
||||
This will check if the task has been cancelled, but will not allow other tasks
|
||||
to be scheduled if not.
|
||||
|
||||
"""
|
||||
if cls.current_effective_deadline() == -math.inf:
|
||||
await cls.checkpoint()
|
||||
|
||||
@classmethod
|
||||
async def cancel_shielded_checkpoint(cls) -> None:
|
||||
"""
|
||||
Allow the rescheduling of other tasks.
|
||||
|
||||
This will give other tasks the opportunity to run, but without checking if the
|
||||
current task group has been cancelled, unlike with :meth:`checkpoint`.
|
||||
|
||||
"""
|
||||
with cls.create_cancel_scope(shield=True):
|
||||
await cls.sleep(0)
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def sleep(cls, delay: float) -> None:
|
||||
"""
|
||||
Pause the current task for the specified duration.
|
||||
|
||||
:param delay: the duration, in seconds
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_cancel_scope(
|
||||
cls, *, deadline: float = math.inf, shield: bool = False
|
||||
) -> CancelScope:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_effective_deadline(cls) -> float:
|
||||
"""
|
||||
Return the nearest deadline among all the cancel scopes effective for the
|
||||
current task.
|
||||
|
||||
:return:
|
||||
- a clock value from the event loop's internal clock
|
||||
- ``inf`` if there is no deadline in effect
|
||||
- ``-inf`` if the current scope has been cancelled
|
||||
:rtype: float
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_task_group(cls) -> TaskGroup:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_event(cls) -> Event:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_lock(cls, *, fast_acquire: bool) -> Lock:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_semaphore(
|
||||
cls,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> Semaphore:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def run_sync_in_worker_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
abandon_on_cancel: bool = False,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def check_cancelled(cls) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run_async_from_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
token: object,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run_sync_from_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
token: object,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_blocking_portal(cls) -> BlockingPortal:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def open_process(
|
||||
cls,
|
||||
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
||||
*,
|
||||
stdin: int | IO[Any] | None,
|
||||
stdout: int | IO[Any] | None,
|
||||
stderr: int | IO[Any] | None,
|
||||
**kwargs: Any,
|
||||
) -> Process:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def connect_tcp(
|
||||
cls, host: str, port: int, local_address: IPSockAddrType | None = None
|
||||
) -> SocketStream:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_tcp_listener(cls, sock: socket) -> SocketListener:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_unix_listener(cls, sock: socket) -> SocketListener:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_udp_socket(
|
||||
cls,
|
||||
family: AddressFamily,
|
||||
local_address: IPSockAddrType | None,
|
||||
remote_address: IPSockAddrType | None,
|
||||
reuse_port: bool,
|
||||
) -> UDPSocket | ConnectedUDPSocket:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: None
|
||||
) -> UNIXDatagramSocket: ...
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: str | bytes
|
||||
) -> ConnectedUNIXDatagramSocket: ...
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: str | bytes | None
|
||||
) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def getaddrinfo(
|
||||
cls,
|
||||
host: bytes | str | None,
|
||||
port: str | int | None,
|
||||
*,
|
||||
family: int | AddressFamily = 0,
|
||||
type: int | SocketKind = 0,
|
||||
proto: int = 0,
|
||||
flags: int = 0,
|
||||
) -> Sequence[
|
||||
tuple[
|
||||
AddressFamily,
|
||||
SocketKind,
|
||||
int,
|
||||
str,
|
||||
tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes],
|
||||
]
|
||||
]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def getnameinfo(
|
||||
cls, sockaddr: IPSockAddrType, flags: int = 0
|
||||
) -> tuple[str, str]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_readable(cls, obj: HasFileno | int) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_writable(cls, obj: HasFileno | int) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_default_thread_limiter(cls) -> CapacityLimiter:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def open_signal_receiver(
|
||||
cls, *signals: Signals
|
||||
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_current_task(cls) -> TaskInfo:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_running_tasks(cls) -> Sequence[TaskInfo]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_all_tasks_blocked(cls) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
|
||||
pass
|
||||
@@ -0,0 +1,33 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from types import TracebackType
|
||||
from typing import TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class AsyncResource(metaclass=ABCMeta):
|
||||
"""
|
||||
Abstract base class for all closeable asynchronous resources.
|
||||
|
||||
Works as an asynchronous context manager which returns the instance itself on enter,
|
||||
and calls :meth:`aclose` on exit.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
async def __aenter__(self: T) -> T:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.aclose()
|
||||
|
||||
@abstractmethod
|
||||
async def aclose(self) -> None:
|
||||
"""Close the resource."""
|
||||
@@ -0,0 +1,194 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable, Collection, Mapping
|
||||
from contextlib import AsyncExitStack
|
||||
from io import IOBase
|
||||
from ipaddress import IPv4Address, IPv6Address
|
||||
from socket import AddressFamily
|
||||
from types import TracebackType
|
||||
from typing import Any, TypeVar, Union
|
||||
|
||||
from .._core._typedattr import (
|
||||
TypedAttributeProvider,
|
||||
TypedAttributeSet,
|
||||
typed_attribute,
|
||||
)
|
||||
from ._streams import ByteStream, Listener, UnreliableObjectStream
|
||||
from ._tasks import TaskGroup
|
||||
|
||||
IPAddressType = Union[str, IPv4Address, IPv6Address]
|
||||
IPSockAddrType = tuple[str, int]
|
||||
SockAddrType = Union[IPSockAddrType, str]
|
||||
UDPPacketType = tuple[bytes, IPSockAddrType]
|
||||
UNIXDatagramPacketType = tuple[bytes, str]
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
|
||||
|
||||
class _NullAsyncContextManager:
|
||||
async def __aenter__(self) -> None:
|
||||
pass
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
return None
|
||||
|
||||
|
||||
class SocketAttribute(TypedAttributeSet):
|
||||
#: the address family of the underlying socket
|
||||
family: AddressFamily = typed_attribute()
|
||||
#: the local socket address of the underlying socket
|
||||
local_address: SockAddrType = typed_attribute()
|
||||
#: for IP addresses, the local port the underlying socket is bound to
|
||||
local_port: int = typed_attribute()
|
||||
#: the underlying stdlib socket object
|
||||
raw_socket: socket.socket = typed_attribute()
|
||||
#: the remote address the underlying socket is connected to
|
||||
remote_address: SockAddrType = typed_attribute()
|
||||
#: for IP addresses, the remote port the underlying socket is connected to
|
||||
remote_port: int = typed_attribute()
|
||||
|
||||
|
||||
class _SocketProvider(TypedAttributeProvider):
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
from .._core._sockets import convert_ipv6_sockaddr as convert
|
||||
|
||||
attributes: dict[Any, Callable[[], Any]] = {
|
||||
SocketAttribute.family: lambda: self._raw_socket.family,
|
||||
SocketAttribute.local_address: lambda: convert(
|
||||
self._raw_socket.getsockname()
|
||||
),
|
||||
SocketAttribute.raw_socket: lambda: self._raw_socket,
|
||||
}
|
||||
try:
|
||||
peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
|
||||
except OSError:
|
||||
peername = None
|
||||
|
||||
# Provide the remote address for connected sockets
|
||||
if peername is not None:
|
||||
attributes[SocketAttribute.remote_address] = lambda: peername
|
||||
|
||||
# Provide local and remote ports for IP based sockets
|
||||
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
|
||||
attributes[SocketAttribute.local_port] = (
|
||||
lambda: self._raw_socket.getsockname()[1]
|
||||
)
|
||||
if peername is not None:
|
||||
remote_port = peername[1]
|
||||
attributes[SocketAttribute.remote_port] = lambda: remote_port
|
||||
|
||||
return attributes
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _raw_socket(self) -> socket.socket:
|
||||
pass
|
||||
|
||||
|
||||
class SocketStream(ByteStream, _SocketProvider):
|
||||
"""
|
||||
Transports bytes over a socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
|
||||
class UNIXSocketStream(SocketStream):
|
||||
@abstractmethod
|
||||
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
|
||||
"""
|
||||
Send file descriptors along with a message to the peer.
|
||||
|
||||
:param message: a non-empty bytestring
|
||||
:param fds: a collection of files (either numeric file descriptors or open file
|
||||
or socket objects)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
|
||||
"""
|
||||
Receive file descriptors along with a message from the peer.
|
||||
|
||||
:param msglen: length of the message to expect from the peer
|
||||
:param maxfds: maximum number of file descriptors to expect from the peer
|
||||
:return: a tuple of (message, file descriptors)
|
||||
"""
|
||||
|
||||
|
||||
class SocketListener(Listener[SocketStream], _SocketProvider):
|
||||
"""
|
||||
Listens to incoming socket connections.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def accept(self) -> SocketStream:
|
||||
"""Accept an incoming connection."""
|
||||
|
||||
async def serve(
|
||||
self,
|
||||
handler: Callable[[SocketStream], Any],
|
||||
task_group: TaskGroup | None = None,
|
||||
) -> None:
|
||||
from .. import create_task_group
|
||||
|
||||
async with AsyncExitStack() as stack:
|
||||
if task_group is None:
|
||||
task_group = await stack.enter_async_context(create_task_group())
|
||||
|
||||
while True:
|
||||
stream = await self.accept()
|
||||
task_group.start_soon(handler, stream)
|
||||
|
||||
|
||||
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
|
||||
"""
|
||||
Represents an unconnected UDP socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
async def sendto(self, data: bytes, host: str, port: int) -> None:
|
||||
"""
|
||||
Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
|
||||
|
||||
"""
|
||||
return await self.send((data, (host, port)))
|
||||
|
||||
|
||||
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
||||
"""
|
||||
Represents an connected UDP socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
|
||||
class UNIXDatagramSocket(
|
||||
UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
|
||||
):
|
||||
"""
|
||||
Represents an unconnected Unix datagram socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
async def sendto(self, data: bytes, path: str) -> None:
|
||||
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
|
||||
return await self.send((data, path))
|
||||
|
||||
|
||||
class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
||||
"""
|
||||
Represents a connected Unix datagram socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
@@ -0,0 +1,203 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Generic, TypeVar, Union
|
||||
|
||||
from .._core._exceptions import EndOfStream
|
||||
from .._core._typedattr import TypedAttributeProvider
|
||||
from ._resources import AsyncResource
|
||||
from ._tasks import TaskGroup
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
|
||||
|
||||
class UnreliableObjectReceiveStream(
|
||||
Generic[T_co], AsyncResource, TypedAttributeProvider
|
||||
):
|
||||
"""
|
||||
An interface for receiving objects.
|
||||
|
||||
This interface makes no guarantees that the received messages arrive in the order in
|
||||
which they were sent, or that no messages are missed.
|
||||
|
||||
Asynchronously iterating over objects of this type will yield objects matching the
|
||||
given type parameter.
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> T_co:
|
||||
try:
|
||||
return await self.receive()
|
||||
except EndOfStream:
|
||||
raise StopAsyncIteration
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self) -> T_co:
|
||||
"""
|
||||
Receive the next item.
|
||||
|
||||
:raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
|
||||
closed
|
||||
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
||||
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
||||
due to external causes
|
||||
"""
|
||||
|
||||
|
||||
class UnreliableObjectSendStream(
|
||||
Generic[T_contra], AsyncResource, TypedAttributeProvider
|
||||
):
|
||||
"""
|
||||
An interface for sending objects.
|
||||
|
||||
This interface makes no guarantees that the messages sent will reach the
|
||||
recipient(s) in the same order in which they were sent, or at all.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item to the peer(s).
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if the send stream has been explicitly
|
||||
closed
|
||||
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
||||
due to external causes
|
||||
"""
|
||||
|
||||
|
||||
class UnreliableObjectStream(
|
||||
UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
|
||||
):
|
||||
"""
|
||||
A bidirectional message stream which does not guarantee the order or reliability of
|
||||
message delivery.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
|
||||
"""
|
||||
A receive message stream which guarantees that messages are received in the same
|
||||
order in which they were sent, and that no messages are missed.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
|
||||
"""
|
||||
A send message stream which guarantees that messages are delivered in the same order
|
||||
in which they were sent, without missing any messages in the middle.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectStream(
|
||||
ObjectReceiveStream[T_Item],
|
||||
ObjectSendStream[T_Item],
|
||||
UnreliableObjectStream[T_Item],
|
||||
):
|
||||
"""
|
||||
A bidirectional message stream which guarantees the order and reliability of message
|
||||
delivery.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self) -> None:
|
||||
"""
|
||||
Send an end-of-file indication to the peer.
|
||||
|
||||
You should not try to send any further data to this stream after calling this
|
||||
method. This method is idempotent (does nothing on successive calls).
|
||||
"""
|
||||
|
||||
|
||||
class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
|
||||
"""
|
||||
An interface for receiving bytes from a single peer.
|
||||
|
||||
Iterating this byte stream will yield a byte string of arbitrary length, but no more
|
||||
than 65536 bytes.
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> ByteReceiveStream:
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> bytes:
|
||||
try:
|
||||
return await self.receive()
|
||||
except EndOfStream:
|
||||
raise StopAsyncIteration
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
"""
|
||||
Receive at most ``max_bytes`` bytes from the peer.
|
||||
|
||||
.. note:: Implementers of this interface should not return an empty
|
||||
:class:`bytes` object, and users should ignore them.
|
||||
|
||||
:param max_bytes: maximum number of bytes to receive
|
||||
:return: the received bytes
|
||||
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
||||
"""
|
||||
|
||||
|
||||
class ByteSendStream(AsyncResource, TypedAttributeProvider):
|
||||
"""An interface for sending bytes to a single peer."""
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, item: bytes) -> None:
|
||||
"""
|
||||
Send the given bytes to the peer.
|
||||
|
||||
:param item: the bytes to send
|
||||
"""
|
||||
|
||||
|
||||
class ByteStream(ByteReceiveStream, ByteSendStream):
|
||||
"""A bidirectional byte stream."""
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self) -> None:
|
||||
"""
|
||||
Send an end-of-file indication to the peer.
|
||||
|
||||
You should not try to send any further data to this stream after calling this
|
||||
method. This method is idempotent (does nothing on successive calls).
|
||||
"""
|
||||
|
||||
|
||||
#: Type alias for all unreliable bytes-oriented receive streams.
|
||||
AnyUnreliableByteReceiveStream = Union[
|
||||
UnreliableObjectReceiveStream[bytes], ByteReceiveStream
|
||||
]
|
||||
#: Type alias for all unreliable bytes-oriented send streams.
|
||||
AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]
|
||||
#: Type alias for all unreliable bytes-oriented streams.
|
||||
AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]
|
||||
#: Type alias for all bytes-oriented receive streams.
|
||||
AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
|
||||
#: Type alias for all bytes-oriented send streams.
|
||||
AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]
|
||||
#: Type alias for all bytes-oriented streams.
|
||||
AnyByteStream = Union[ObjectStream[bytes], ByteStream]
|
||||
|
||||
|
||||
class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
|
||||
"""An interface for objects that let you accept incoming connections."""
|
||||
|
||||
@abstractmethod
|
||||
async def serve(
|
||||
self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Accept incoming connections as they come in and start tasks to handle them.
|
||||
|
||||
:param handler: a callable that will be used to handle each accepted connection
|
||||
:param task_group: the task group that will be used to start tasks for handling
|
||||
each accepted connection (if omitted, an ad-hoc task group will be created)
|
||||
"""
|
||||
@@ -0,0 +1,79 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from signal import Signals
|
||||
|
||||
from ._resources import AsyncResource
|
||||
from ._streams import ByteReceiveStream, ByteSendStream
|
||||
|
||||
|
||||
class Process(AsyncResource):
|
||||
"""An asynchronous version of :class:`subprocess.Popen`."""
|
||||
|
||||
@abstractmethod
|
||||
async def wait(self) -> int:
|
||||
"""
|
||||
Wait until the process exits.
|
||||
|
||||
:return: the exit code of the process
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def terminate(self) -> None:
|
||||
"""
|
||||
Terminates the process, gracefully if possible.
|
||||
|
||||
On Windows, this calls ``TerminateProcess()``.
|
||||
On POSIX systems, this sends ``SIGTERM`` to the process.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.terminate`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def kill(self) -> None:
|
||||
"""
|
||||
Kills the process.
|
||||
|
||||
On Windows, this calls ``TerminateProcess()``.
|
||||
On POSIX systems, this sends ``SIGKILL`` to the process.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.kill`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def send_signal(self, signal: Signals) -> None:
|
||||
"""
|
||||
Send a signal to the subprocess.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.send_signal`
|
||||
|
||||
:param signal: the signal number (e.g. :data:`signal.SIGHUP`)
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def pid(self) -> int:
|
||||
"""The process ID of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def returncode(self) -> int | None:
|
||||
"""
|
||||
The return code of the process. If the process has not yet terminated, this will
|
||||
be ``None``.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdin(self) -> ByteSendStream | None:
|
||||
"""The stream for the standard input of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdout(self) -> ByteReceiveStream | None:
|
||||
"""The stream for the standard output of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stderr(self) -> ByteReceiveStream | None:
|
||||
"""The stream for the standard error output of the process."""
|
||||
@@ -0,0 +1,101 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import Awaitable, Callable
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .._core._tasks import CancelScope
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
class TaskStatus(Protocol[T_contra]):
|
||||
@overload
|
||||
def started(self: TaskStatus[None]) -> None: ...
|
||||
|
||||
@overload
|
||||
def started(self, value: T_contra) -> None: ...
|
||||
|
||||
def started(self, value: T_contra | None = None) -> None:
|
||||
"""
|
||||
Signal that the task has started.
|
||||
|
||||
:param value: object passed back to the starter of the task
|
||||
"""
|
||||
|
||||
|
||||
class TaskGroup(metaclass=ABCMeta):
|
||||
"""
|
||||
Groups several asynchronous tasks together.
|
||||
|
||||
:ivar cancel_scope: the cancel scope inherited by all child tasks
|
||||
:vartype cancel_scope: CancelScope
|
||||
|
||||
.. note:: On asyncio, support for eager task factories is considered to be
|
||||
**experimental**. In particular, they don't follow the usual semantics of new
|
||||
tasks being scheduled on the next iteration of the event loop, and may thus
|
||||
cause unexpected behavior in code that wasn't written with such semantics in
|
||||
mind.
|
||||
"""
|
||||
|
||||
cancel_scope: CancelScope
|
||||
|
||||
@abstractmethod
|
||||
def start_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> None:
|
||||
"""
|
||||
Start a new task in this task group.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to call the function with
|
||||
:param name: name of the task, for the purposes of introspection and debugging
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def start(
|
||||
self,
|
||||
func: Callable[..., Awaitable[Any]],
|
||||
*args: object,
|
||||
name: object = None,
|
||||
) -> Any:
|
||||
"""
|
||||
Start a new task and wait until it signals for readiness.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to call the function with
|
||||
:param name: name of the task, for the purposes of introspection and debugging
|
||||
:return: the value passed to ``task_status.started()``
|
||||
:raises RuntimeError: if the task finishes without calling
|
||||
``task_status.started()``
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def __aenter__(self) -> TaskGroup:
|
||||
"""Enter the task group context and allow starting new tasks."""
|
||||
|
||||
@abstractmethod
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
"""Exit the task group context waiting for all tasks to finish."""
|
||||
@@ -0,0 +1,65 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import types
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
|
||||
from typing import Any, TypeVar
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class TestRunner(metaclass=ABCMeta):
|
||||
"""
|
||||
Encapsulates a running event loop. Every call made through this object will use the
|
||||
same event loop.
|
||||
"""
|
||||
|
||||
def __enter__(self) -> TestRunner:
|
||||
return self
|
||||
|
||||
@abstractmethod
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: types.TracebackType | None,
|
||||
) -> bool | None: ...
|
||||
|
||||
@abstractmethod
|
||||
def run_asyncgen_fixture(
|
||||
self,
|
||||
fixture_func: Callable[..., AsyncGenerator[_T, Any]],
|
||||
kwargs: dict[str, Any],
|
||||
) -> Iterable[_T]:
|
||||
"""
|
||||
Run an async generator fixture.
|
||||
|
||||
:param fixture_func: the fixture function
|
||||
:param kwargs: keyword arguments to call the fixture function with
|
||||
:return: an iterator yielding the value yielded from the async generator
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def run_fixture(
|
||||
self,
|
||||
fixture_func: Callable[..., Coroutine[Any, Any, _T]],
|
||||
kwargs: dict[str, Any],
|
||||
) -> _T:
|
||||
"""
|
||||
Run an async fixture.
|
||||
|
||||
:param fixture_func: the fixture function
|
||||
:param kwargs: keyword arguments to call the fixture function with
|
||||
:return: the return value of the fixture function
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def run_test(
|
||||
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Run an async test function.
|
||||
|
||||
:param test_func: the test function
|
||||
:param kwargs: keyword arguments to call the test function with
|
||||
"""
|
||||
@@ -0,0 +1,527 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Awaitable, Callable, Generator
|
||||
from concurrent.futures import Future
|
||||
from contextlib import (
|
||||
AbstractAsyncContextManager,
|
||||
AbstractContextManager,
|
||||
contextmanager,
|
||||
)
|
||||
from dataclasses import dataclass, field
|
||||
from inspect import isawaitable
|
||||
from threading import Lock, Thread, get_ident
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Generic,
|
||||
TypeVar,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
|
||||
from ._core import _eventloop
|
||||
from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
|
||||
from ._core._synchronization import Event
|
||||
from ._core._tasks import CancelScope, create_task_group
|
||||
from .abc import AsyncBackend
|
||||
from .abc._tasks import TaskStatus
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
def run(
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call a coroutine function from a worker thread.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments for the callable
|
||||
:return: the return value of the coroutine function
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend = threadlocals.current_async_backend
|
||||
token = threadlocals.current_token
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
return async_backend.run_async_from_thread(func, args, token=token)
|
||||
|
||||
|
||||
def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call a function in the event loop thread from a worker thread.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:return: the return value of the callable
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend = threadlocals.current_async_backend
|
||||
token = threadlocals.current_token
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
return async_backend.run_sync_from_thread(func, args, token=token)
|
||||
|
||||
|
||||
class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
|
||||
_enter_future: Future[T_co]
|
||||
_exit_future: Future[bool | None]
|
||||
_exit_event: Event
|
||||
_exit_exc_info: tuple[
|
||||
type[BaseException] | None, BaseException | None, TracebackType | None
|
||||
] = (None, None, None)
|
||||
|
||||
def __init__(
|
||||
self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal
|
||||
):
|
||||
self._async_cm = async_cm
|
||||
self._portal = portal
|
||||
|
||||
async def run_async_cm(self) -> bool | None:
|
||||
try:
|
||||
self._exit_event = Event()
|
||||
value = await self._async_cm.__aenter__()
|
||||
except BaseException as exc:
|
||||
self._enter_future.set_exception(exc)
|
||||
raise
|
||||
else:
|
||||
self._enter_future.set_result(value)
|
||||
|
||||
try:
|
||||
# Wait for the sync context manager to exit.
|
||||
# This next statement can raise `get_cancelled_exc_class()` if
|
||||
# something went wrong in a task group in this async context
|
||||
# manager.
|
||||
await self._exit_event.wait()
|
||||
finally:
|
||||
# In case of cancellation, it could be that we end up here before
|
||||
# `_BlockingAsyncContextManager.__exit__` is called, and an
|
||||
# `_exit_exc_info` has been set.
|
||||
result = await self._async_cm.__aexit__(*self._exit_exc_info)
|
||||
return result
|
||||
|
||||
def __enter__(self) -> T_co:
|
||||
self._enter_future = Future()
|
||||
self._exit_future = self._portal.start_task_soon(self.run_async_cm)
|
||||
return self._enter_future.result()
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
__exc_type: type[BaseException] | None,
|
||||
__exc_value: BaseException | None,
|
||||
__traceback: TracebackType | None,
|
||||
) -> bool | None:
|
||||
self._exit_exc_info = __exc_type, __exc_value, __traceback
|
||||
self._portal.call(self._exit_event.set)
|
||||
return self._exit_future.result()
|
||||
|
||||
|
||||
class _BlockingPortalTaskStatus(TaskStatus):
|
||||
def __init__(self, future: Future):
|
||||
self._future = future
|
||||
|
||||
def started(self, value: object = None) -> None:
|
||||
self._future.set_result(value)
|
||||
|
||||
|
||||
class BlockingPortal:
|
||||
"""An object that lets external threads run code in an asynchronous event loop."""
|
||||
|
||||
def __new__(cls) -> BlockingPortal:
|
||||
return get_async_backend().create_blocking_portal()
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._event_loop_thread_id: int | None = get_ident()
|
||||
self._stop_event = Event()
|
||||
self._task_group = create_task_group()
|
||||
self._cancelled_exc_class = get_cancelled_exc_class()
|
||||
|
||||
async def __aenter__(self) -> BlockingPortal:
|
||||
await self._task_group.__aenter__()
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
await self.stop()
|
||||
return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
def _check_running(self) -> None:
|
||||
if self._event_loop_thread_id is None:
|
||||
raise RuntimeError("This portal is not running")
|
||||
if self._event_loop_thread_id == get_ident():
|
||||
raise RuntimeError(
|
||||
"This method cannot be called from the event loop thread"
|
||||
)
|
||||
|
||||
async def sleep_until_stopped(self) -> None:
|
||||
"""Sleep until :meth:`stop` is called."""
|
||||
await self._stop_event.wait()
|
||||
|
||||
async def stop(self, cancel_remaining: bool = False) -> None:
|
||||
"""
|
||||
Signal the portal to shut down.
|
||||
|
||||
This marks the portal as no longer accepting new calls and exits from
|
||||
:meth:`sleep_until_stopped`.
|
||||
|
||||
:param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
|
||||
to let them finish before returning
|
||||
|
||||
"""
|
||||
self._event_loop_thread_id = None
|
||||
self._stop_event.set()
|
||||
if cancel_remaining:
|
||||
self._task_group.cancel_scope.cancel()
|
||||
|
||||
async def _call_func(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
future: Future[T_Retval],
|
||||
) -> None:
|
||||
def callback(f: Future[T_Retval]) -> None:
|
||||
if f.cancelled() and self._event_loop_thread_id not in (
|
||||
None,
|
||||
get_ident(),
|
||||
):
|
||||
self.call(scope.cancel)
|
||||
|
||||
try:
|
||||
retval_or_awaitable = func(*args, **kwargs)
|
||||
if isawaitable(retval_or_awaitable):
|
||||
with CancelScope() as scope:
|
||||
if future.cancelled():
|
||||
scope.cancel()
|
||||
else:
|
||||
future.add_done_callback(callback)
|
||||
|
||||
retval = await retval_or_awaitable
|
||||
else:
|
||||
retval = retval_or_awaitable
|
||||
except self._cancelled_exc_class:
|
||||
future.cancel()
|
||||
future.set_running_or_notify_cancel()
|
||||
except BaseException as exc:
|
||||
if not future.cancelled():
|
||||
future.set_exception(exc)
|
||||
|
||||
# Let base exceptions fall through
|
||||
if not isinstance(exc, Exception):
|
||||
raise
|
||||
else:
|
||||
if not future.cancelled():
|
||||
future.set_result(retval)
|
||||
finally:
|
||||
scope = None # type: ignore[assignment]
|
||||
|
||||
def _spawn_task_from_thread(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
name: object,
|
||||
future: Future[T_Retval],
|
||||
) -> None:
|
||||
"""
|
||||
Spawn a new task using the given callable.
|
||||
|
||||
Implementers must ensure that the future is resolved when the task finishes.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments to be passed to the callable
|
||||
:param kwargs: keyword arguments to be passed to the callable
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:param future: a future that will resolve to the return value of the callable,
|
||||
or the exception raised during its execution
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@overload
|
||||
def call(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
) -> T_Retval: ...
|
||||
|
||||
@overload
|
||||
def call(
|
||||
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval: ...
|
||||
|
||||
def call(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function in the event loop thread.
|
||||
|
||||
If the callable returns a coroutine object, it is awaited on.
|
||||
|
||||
:param func: any callable
|
||||
:raises RuntimeError: if the portal is not running or if this method is called
|
||||
from within the event loop thread
|
||||
|
||||
"""
|
||||
return cast(T_Retval, self.start_task_soon(func, *args).result())
|
||||
|
||||
@overload
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]: ...
|
||||
|
||||
@overload
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]: ...
|
||||
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]:
|
||||
"""
|
||||
Start a task in the portal's task group.
|
||||
|
||||
The task will be run inside a cancel scope which can be cancelled by cancelling
|
||||
the returned future.
|
||||
|
||||
:param func: the target function
|
||||
:param args: positional arguments passed to ``func``
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:return: a future that resolves with the return value of the callable if the
|
||||
task completes successfully, or with the exception raised in the task
|
||||
:raises RuntimeError: if the portal is not running or if this method is called
|
||||
from within the event loop thread
|
||||
:rtype: concurrent.futures.Future[T_Retval]
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
self._check_running()
|
||||
f: Future[T_Retval] = Future()
|
||||
self._spawn_task_from_thread(func, args, {}, name, f)
|
||||
return f
|
||||
|
||||
def start_task(
|
||||
self,
|
||||
func: Callable[..., Awaitable[T_Retval]],
|
||||
*args: object,
|
||||
name: object = None,
|
||||
) -> tuple[Future[T_Retval], Any]:
|
||||
"""
|
||||
Start a task in the portal's task group and wait until it signals for readiness.
|
||||
|
||||
This method works the same way as :meth:`.abc.TaskGroup.start`.
|
||||
|
||||
:param func: the target function
|
||||
:param args: positional arguments passed to ``func``
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:return: a tuple of (future, task_status_value) where the ``task_status_value``
|
||||
is the value passed to ``task_status.started()`` from within the target
|
||||
function
|
||||
:rtype: tuple[concurrent.futures.Future[T_Retval], Any]
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
|
||||
def task_done(future: Future[T_Retval]) -> None:
|
||||
if not task_status_future.done():
|
||||
if future.cancelled():
|
||||
task_status_future.cancel()
|
||||
elif future.exception():
|
||||
task_status_future.set_exception(future.exception())
|
||||
else:
|
||||
exc = RuntimeError(
|
||||
"Task exited without calling task_status.started()"
|
||||
)
|
||||
task_status_future.set_exception(exc)
|
||||
|
||||
self._check_running()
|
||||
task_status_future: Future = Future()
|
||||
task_status = _BlockingPortalTaskStatus(task_status_future)
|
||||
f: Future = Future()
|
||||
f.add_done_callback(task_done)
|
||||
self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
|
||||
return f, task_status_future.result()
|
||||
|
||||
def wrap_async_context_manager(
|
||||
self, cm: AbstractAsyncContextManager[T_co]
|
||||
) -> AbstractContextManager[T_co]:
|
||||
"""
|
||||
Wrap an async context manager as a synchronous context manager via this portal.
|
||||
|
||||
Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
|
||||
in the middle until the synchronous context manager exits.
|
||||
|
||||
:param cm: an asynchronous context manager
|
||||
:return: a synchronous context manager
|
||||
|
||||
.. versionadded:: 2.1
|
||||
|
||||
"""
|
||||
return _BlockingAsyncContextManager(cm, self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlockingPortalProvider:
|
||||
"""
|
||||
A manager for a blocking portal. Used as a context manager. The first thread to
|
||||
enter this context manager causes a blocking portal to be started with the specific
|
||||
parameters, and the last thread to exit causes the portal to be shut down. Thus,
|
||||
there will be exactly one blocking portal running in this context as long as at
|
||||
least one thread has entered this context manager.
|
||||
|
||||
The parameters are the same as for :func:`~anyio.run`.
|
||||
|
||||
:param backend: name of the backend
|
||||
:param backend_options: backend options
|
||||
|
||||
.. versionadded:: 4.4
|
||||
"""
|
||||
|
||||
backend: str = "asyncio"
|
||||
backend_options: dict[str, Any] | None = None
|
||||
_lock: Lock = field(init=False, default_factory=Lock)
|
||||
_leases: int = field(init=False, default=0)
|
||||
_portal: BlockingPortal = field(init=False)
|
||||
_portal_cm: AbstractContextManager[BlockingPortal] | None = field(
|
||||
init=False, default=None
|
||||
)
|
||||
|
||||
def __enter__(self) -> BlockingPortal:
|
||||
with self._lock:
|
||||
if self._portal_cm is None:
|
||||
self._portal_cm = start_blocking_portal(
|
||||
self.backend, self.backend_options
|
||||
)
|
||||
self._portal = self._portal_cm.__enter__()
|
||||
|
||||
self._leases += 1
|
||||
return self._portal
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
portal_cm: AbstractContextManager[BlockingPortal] | None = None
|
||||
with self._lock:
|
||||
assert self._portal_cm
|
||||
assert self._leases > 0
|
||||
self._leases -= 1
|
||||
if not self._leases:
|
||||
portal_cm = self._portal_cm
|
||||
self._portal_cm = None
|
||||
del self._portal
|
||||
|
||||
if portal_cm:
|
||||
portal_cm.__exit__(None, None, None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def start_blocking_portal(
|
||||
backend: str = "asyncio", backend_options: dict[str, Any] | None = None
|
||||
) -> Generator[BlockingPortal, Any, None]:
|
||||
"""
|
||||
Start a new event loop in a new thread and run a blocking portal in its main task.
|
||||
|
||||
The parameters are the same as for :func:`~anyio.run`.
|
||||
|
||||
:param backend: name of the backend
|
||||
:param backend_options: backend options
|
||||
:return: a context manager that yields a blocking portal
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Usage as a context manager is now required.
|
||||
|
||||
"""
|
||||
|
||||
async def run_portal() -> None:
|
||||
async with BlockingPortal() as portal_:
|
||||
future.set_result(portal_)
|
||||
await portal_.sleep_until_stopped()
|
||||
|
||||
def run_blocking_portal() -> None:
|
||||
if future.set_running_or_notify_cancel():
|
||||
try:
|
||||
_eventloop.run(
|
||||
run_portal, backend=backend, backend_options=backend_options
|
||||
)
|
||||
except BaseException as exc:
|
||||
if not future.done():
|
||||
future.set_exception(exc)
|
||||
|
||||
future: Future[BlockingPortal] = Future()
|
||||
thread = Thread(target=run_blocking_portal, daemon=True)
|
||||
thread.start()
|
||||
try:
|
||||
cancel_remaining_tasks = False
|
||||
portal = future.result()
|
||||
try:
|
||||
yield portal
|
||||
except BaseException:
|
||||
cancel_remaining_tasks = True
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
portal.call(portal.stop, cancel_remaining_tasks)
|
||||
except RuntimeError:
|
||||
pass
|
||||
finally:
|
||||
thread.join()
|
||||
|
||||
|
||||
def check_cancelled() -> None:
|
||||
"""
|
||||
Check if the cancel scope of the host task's running the current worker thread has
|
||||
been cancelled.
|
||||
|
||||
If the host task's current cancel scope has indeed been cancelled, the
|
||||
backend-specific cancellation exception will be raised.
|
||||
|
||||
:raises RuntimeError: if the current thread was not spawned by
|
||||
:func:`.to_thread.run_sync`
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend: AsyncBackend = threadlocals.current_async_backend
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
async_backend.check_cancelled()
|
||||
@@ -0,0 +1,161 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Generic, Literal, TypeVar, overload
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
from ._core._eventloop import get_async_backend
|
||||
|
||||
T = TypeVar("T")
|
||||
D = TypeVar("D")
|
||||
|
||||
|
||||
async def checkpoint() -> None:
|
||||
"""
|
||||
Check for cancellation and allow the scheduler to switch to another task.
|
||||
|
||||
Equivalent to (but more efficient than)::
|
||||
|
||||
await checkpoint_if_cancelled()
|
||||
await cancel_shielded_checkpoint()
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().checkpoint()
|
||||
|
||||
|
||||
async def checkpoint_if_cancelled() -> None:
|
||||
"""
|
||||
Enter a checkpoint if the enclosing cancel scope has been cancelled.
|
||||
|
||||
This does not allow the scheduler to switch to a different task.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().checkpoint_if_cancelled()
|
||||
|
||||
|
||||
async def cancel_shielded_checkpoint() -> None:
|
||||
"""
|
||||
Allow the scheduler to switch to another task but without checking for cancellation.
|
||||
|
||||
Equivalent to (but potentially more efficient than)::
|
||||
|
||||
with CancelScope(shield=True):
|
||||
await checkpoint()
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().cancel_shielded_checkpoint()
|
||||
|
||||
|
||||
def current_token() -> object:
|
||||
"""
|
||||
Return a backend specific token object that can be used to get back to the event
|
||||
loop.
|
||||
|
||||
"""
|
||||
return get_async_backend().current_token()
|
||||
|
||||
|
||||
_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
|
||||
_token_wrappers: dict[Any, _TokenWrapper] = {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _TokenWrapper:
|
||||
__slots__ = "_token", "__weakref__"
|
||||
_token: object
|
||||
|
||||
|
||||
class _NoValueSet(enum.Enum):
|
||||
NO_VALUE_SET = enum.auto()
|
||||
|
||||
|
||||
class RunvarToken(Generic[T]):
|
||||
__slots__ = "_var", "_value", "_redeemed"
|
||||
|
||||
def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
|
||||
self._var = var
|
||||
self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
|
||||
self._redeemed = False
|
||||
|
||||
|
||||
class RunVar(Generic[T]):
|
||||
"""
|
||||
Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
|
||||
"""
|
||||
|
||||
__slots__ = "_name", "_default"
|
||||
|
||||
NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
|
||||
|
||||
_token_wrappers: set[_TokenWrapper] = set()
|
||||
|
||||
def __init__(
|
||||
self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
||||
):
|
||||
self._name = name
|
||||
self._default = default
|
||||
|
||||
@property
|
||||
def _current_vars(self) -> dict[str, T]:
|
||||
token = current_token()
|
||||
try:
|
||||
return _run_vars[token]
|
||||
except KeyError:
|
||||
run_vars = _run_vars[token] = {}
|
||||
return run_vars
|
||||
|
||||
@overload
|
||||
def get(self, default: D) -> T | D: ...
|
||||
|
||||
@overload
|
||||
def get(self) -> T: ...
|
||||
|
||||
def get(
|
||||
self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
||||
) -> T | D:
|
||||
try:
|
||||
return self._current_vars[self._name]
|
||||
except KeyError:
|
||||
if default is not RunVar.NO_VALUE_SET:
|
||||
return default
|
||||
elif self._default is not RunVar.NO_VALUE_SET:
|
||||
return self._default
|
||||
|
||||
raise LookupError(
|
||||
f'Run variable "{self._name}" has no value and no default set'
|
||||
)
|
||||
|
||||
def set(self, value: T) -> RunvarToken[T]:
|
||||
current_vars = self._current_vars
|
||||
token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
|
||||
current_vars[self._name] = value
|
||||
return token
|
||||
|
||||
def reset(self, token: RunvarToken[T]) -> None:
|
||||
if token._var is not self:
|
||||
raise ValueError("This token does not belong to this RunVar")
|
||||
|
||||
if token._redeemed:
|
||||
raise ValueError("This token has already been used")
|
||||
|
||||
if token._value is _NoValueSet.NO_VALUE_SET:
|
||||
try:
|
||||
del self._current_vars[self._name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self._current_vars[self._name] = token._value
|
||||
|
||||
token._redeemed = True
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<RunVar name={self._name!r}>"
|
||||
@@ -0,0 +1,272 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
import sys
|
||||
from collections.abc import Callable, Generator, Iterator
|
||||
from contextlib import ExitStack, contextmanager
|
||||
from inspect import isasyncgenfunction, iscoroutinefunction, ismethod
|
||||
from typing import Any, cast
|
||||
|
||||
import pytest
|
||||
import sniffio
|
||||
from _pytest.fixtures import SubRequest
|
||||
from _pytest.outcomes import Exit
|
||||
|
||||
from ._core._eventloop import get_all_backends, get_async_backend
|
||||
from ._core._exceptions import iterate_exceptions
|
||||
from .abc import TestRunner
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import ExceptionGroup
|
||||
|
||||
_current_runner: TestRunner | None = None
|
||||
_runner_stack: ExitStack | None = None
|
||||
_runner_leases = 0
|
||||
|
||||
|
||||
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
|
||||
if isinstance(backend, str):
|
||||
return backend, {}
|
||||
elif isinstance(backend, tuple) and len(backend) == 2:
|
||||
if isinstance(backend[0], str) and isinstance(backend[1], dict):
|
||||
return cast(tuple[str, dict[str, Any]], backend)
|
||||
|
||||
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_runner(
|
||||
backend_name: str, backend_options: dict[str, Any]
|
||||
) -> Iterator[TestRunner]:
|
||||
global _current_runner, _runner_leases, _runner_stack
|
||||
if _current_runner is None:
|
||||
asynclib = get_async_backend(backend_name)
|
||||
_runner_stack = ExitStack()
|
||||
if sniffio.current_async_library_cvar.get(None) is None:
|
||||
# Since we're in control of the event loop, we can cache the name of the
|
||||
# async library
|
||||
token = sniffio.current_async_library_cvar.set(backend_name)
|
||||
_runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
|
||||
|
||||
backend_options = backend_options or {}
|
||||
_current_runner = _runner_stack.enter_context(
|
||||
asynclib.create_test_runner(backend_options)
|
||||
)
|
||||
|
||||
_runner_leases += 1
|
||||
try:
|
||||
yield _current_runner
|
||||
finally:
|
||||
_runner_leases -= 1
|
||||
if not _runner_leases:
|
||||
assert _runner_stack is not None
|
||||
_runner_stack.close()
|
||||
_runner_stack = _current_runner = None
|
||||
|
||||
|
||||
def pytest_configure(config: Any) -> None:
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"anyio: mark the (coroutine function) test to be run asynchronously via anyio.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]:
|
||||
def wrapper(
|
||||
*args: Any, anyio_backend: Any, request: SubRequest, **kwargs: Any
|
||||
) -> Any:
|
||||
# Rebind any fixture methods to the request instance
|
||||
if (
|
||||
request.instance
|
||||
and ismethod(func)
|
||||
and type(func.__self__) is type(request.instance)
|
||||
):
|
||||
local_func = func.__func__.__get__(request.instance)
|
||||
else:
|
||||
local_func = func
|
||||
|
||||
backend_name, backend_options = extract_backend_and_options(anyio_backend)
|
||||
if has_backend_arg:
|
||||
kwargs["anyio_backend"] = anyio_backend
|
||||
|
||||
if has_request_arg:
|
||||
kwargs["request"] = request
|
||||
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
if isasyncgenfunction(local_func):
|
||||
yield from runner.run_asyncgen_fixture(local_func, kwargs)
|
||||
else:
|
||||
yield runner.run_fixture(local_func, kwargs)
|
||||
|
||||
# Only apply this to coroutine functions and async generator functions in requests
|
||||
# that involve the anyio_backend fixture
|
||||
func = fixturedef.func
|
||||
if isasyncgenfunction(func) or iscoroutinefunction(func):
|
||||
if "anyio_backend" in request.fixturenames:
|
||||
fixturedef.func = wrapper
|
||||
original_argname = fixturedef.argnames
|
||||
|
||||
if not (has_backend_arg := "anyio_backend" in fixturedef.argnames):
|
||||
fixturedef.argnames += ("anyio_backend",)
|
||||
|
||||
if not (has_request_arg := "request" in fixturedef.argnames):
|
||||
fixturedef.argnames += ("request",)
|
||||
|
||||
try:
|
||||
return (yield)
|
||||
finally:
|
||||
fixturedef.func = func
|
||||
fixturedef.argnames = original_argname
|
||||
|
||||
return (yield)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
|
||||
if collector.istestfunction(obj, name):
|
||||
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
|
||||
if iscoroutinefunction(inner_func):
|
||||
marker = collector.get_closest_marker("anyio")
|
||||
own_markers = getattr(obj, "pytestmark", ())
|
||||
if marker or any(marker.name == "anyio" for marker in own_markers):
|
||||
pytest.mark.usefixtures("anyio_backend")(obj)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
|
||||
def run_with_hypothesis(**kwargs: Any) -> None:
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
runner.run_test(original_func, kwargs)
|
||||
|
||||
backend = pyfuncitem.funcargs.get("anyio_backend")
|
||||
if backend:
|
||||
backend_name, backend_options = extract_backend_and_options(backend)
|
||||
|
||||
if hasattr(pyfuncitem.obj, "hypothesis"):
|
||||
# Wrap the inner test function unless it's already wrapped
|
||||
original_func = pyfuncitem.obj.hypothesis.inner_test
|
||||
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
|
||||
if iscoroutinefunction(original_func):
|
||||
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
|
||||
|
||||
return None
|
||||
|
||||
if iscoroutinefunction(pyfuncitem.obj):
|
||||
funcargs = pyfuncitem.funcargs
|
||||
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
try:
|
||||
runner.run_test(pyfuncitem.obj, testargs)
|
||||
except ExceptionGroup as excgrp:
|
||||
for exc in iterate_exceptions(excgrp):
|
||||
if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)):
|
||||
raise exc from excgrp
|
||||
|
||||
raise
|
||||
|
||||
return True
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", params=get_all_backends())
|
||||
def anyio_backend(request: Any) -> Any:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend_name(anyio_backend: Any) -> str:
|
||||
if isinstance(anyio_backend, str):
|
||||
return anyio_backend
|
||||
else:
|
||||
return anyio_backend[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
|
||||
if isinstance(anyio_backend, str):
|
||||
return {}
|
||||
else:
|
||||
return anyio_backend[1]
|
||||
|
||||
|
||||
class FreePortFactory:
|
||||
"""
|
||||
Manages port generation based on specified socket kind, ensuring no duplicate
|
||||
ports are generated.
|
||||
|
||||
This class provides functionality for generating available free ports on the
|
||||
system. It is initialized with a specific socket kind and can generate ports
|
||||
for given address families while avoiding reuse of previously generated ports.
|
||||
|
||||
Users should not instantiate this class directly, but use the
|
||||
``free_tcp_port_factory`` and ``free_udp_port_factory`` fixtures instead. For simple
|
||||
uses cases, ``free_tcp_port`` and ``free_udp_port`` can be used instead.
|
||||
"""
|
||||
|
||||
def __init__(self, kind: socket.SocketKind) -> None:
|
||||
self._kind = kind
|
||||
self._generated = set[int]()
|
||||
|
||||
@property
|
||||
def kind(self) -> socket.SocketKind:
|
||||
"""
|
||||
The type of socket connection (e.g., :data:`~socket.SOCK_STREAM` or
|
||||
:data:`~socket.SOCK_DGRAM`) used to bind for checking port availability
|
||||
|
||||
"""
|
||||
return self._kind
|
||||
|
||||
def __call__(self, family: socket.AddressFamily | None = None) -> int:
|
||||
"""
|
||||
Return an unbound port for the given address family.
|
||||
|
||||
:param family: if omitted, both IPv4 and IPv6 addresses will be tried
|
||||
:return: a port number
|
||||
|
||||
"""
|
||||
if family is not None:
|
||||
families = [family]
|
||||
else:
|
||||
families = [socket.AF_INET]
|
||||
if socket.has_ipv6:
|
||||
families.append(socket.AF_INET6)
|
||||
|
||||
while True:
|
||||
port = 0
|
||||
with ExitStack() as stack:
|
||||
for family in families:
|
||||
sock = stack.enter_context(socket.socket(family, self._kind))
|
||||
addr = "::1" if family == socket.AF_INET6 else "127.0.0.1"
|
||||
try:
|
||||
sock.bind((addr, port))
|
||||
except OSError:
|
||||
break
|
||||
|
||||
if not port:
|
||||
port = sock.getsockname()[1]
|
||||
else:
|
||||
if port not in self._generated:
|
||||
self._generated.add(port)
|
||||
return port
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def free_tcp_port_factory() -> FreePortFactory:
|
||||
return FreePortFactory(socket.SOCK_STREAM)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def free_udp_port_factory() -> FreePortFactory:
|
||||
return FreePortFactory(socket.SOCK_DGRAM)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def free_tcp_port(free_tcp_port_factory: Callable[[], int]) -> int:
|
||||
return free_tcp_port_factory()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def free_udp_port(free_udp_port_factory: Callable[[], int]) -> int:
|
||||
return free_udp_port_factory()
|
||||
@@ -0,0 +1,119 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
|
||||
from ..abc import AnyByteReceiveStream, ByteReceiveStream
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class BufferedByteReceiveStream(ByteReceiveStream):
|
||||
"""
|
||||
Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
|
||||
receiving capabilities in the form of a byte stream.
|
||||
"""
|
||||
|
||||
receive_stream: AnyByteReceiveStream
|
||||
_buffer: bytearray = field(init=False, default_factory=bytearray)
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.receive_stream.aclose()
|
||||
self._closed = True
|
||||
|
||||
@property
|
||||
def buffer(self) -> bytes:
|
||||
"""The bytes currently in the buffer."""
|
||||
return bytes(self._buffer)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.receive_stream.extra_attributes
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
if self._buffer:
|
||||
chunk = bytes(self._buffer[:max_bytes])
|
||||
del self._buffer[:max_bytes]
|
||||
return chunk
|
||||
elif isinstance(self.receive_stream, ByteReceiveStream):
|
||||
return await self.receive_stream.receive(max_bytes)
|
||||
else:
|
||||
# With a bytes-oriented object stream, we need to handle any surplus bytes
|
||||
# we get from the receive() call
|
||||
chunk = await self.receive_stream.receive()
|
||||
if len(chunk) > max_bytes:
|
||||
# Save the surplus bytes in the buffer
|
||||
self._buffer.extend(chunk[max_bytes:])
|
||||
return chunk[:max_bytes]
|
||||
else:
|
||||
return chunk
|
||||
|
||||
async def receive_exactly(self, nbytes: int) -> bytes:
|
||||
"""
|
||||
Read exactly the given amount of bytes from the stream.
|
||||
|
||||
:param nbytes: the number of bytes to read
|
||||
:return: the bytes read
|
||||
:raises ~anyio.IncompleteRead: if the stream was closed before the requested
|
||||
amount of bytes could be read from the stream
|
||||
|
||||
"""
|
||||
while True:
|
||||
remaining = nbytes - len(self._buffer)
|
||||
if remaining <= 0:
|
||||
retval = self._buffer[:nbytes]
|
||||
del self._buffer[:nbytes]
|
||||
return bytes(retval)
|
||||
|
||||
try:
|
||||
if isinstance(self.receive_stream, ByteReceiveStream):
|
||||
chunk = await self.receive_stream.receive(remaining)
|
||||
else:
|
||||
chunk = await self.receive_stream.receive()
|
||||
except EndOfStream as exc:
|
||||
raise IncompleteRead from exc
|
||||
|
||||
self._buffer.extend(chunk)
|
||||
|
||||
async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
|
||||
"""
|
||||
Read from the stream until the delimiter is found or max_bytes have been read.
|
||||
|
||||
:param delimiter: the marker to look for in the stream
|
||||
:param max_bytes: maximum number of bytes that will be read before raising
|
||||
:exc:`~anyio.DelimiterNotFound`
|
||||
:return: the bytes read (not including the delimiter)
|
||||
:raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
|
||||
was found
|
||||
:raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
|
||||
bytes read up to the maximum allowed
|
||||
|
||||
"""
|
||||
delimiter_size = len(delimiter)
|
||||
offset = 0
|
||||
while True:
|
||||
# Check if the delimiter can be found in the current buffer
|
||||
index = self._buffer.find(delimiter, offset)
|
||||
if index >= 0:
|
||||
found = self._buffer[:index]
|
||||
del self._buffer[: index + len(delimiter) :]
|
||||
return bytes(found)
|
||||
|
||||
# Check if the buffer is already at or over the limit
|
||||
if len(self._buffer) >= max_bytes:
|
||||
raise DelimiterNotFound(max_bytes)
|
||||
|
||||
# Read more data into the buffer from the socket
|
||||
try:
|
||||
data = await self.receive_stream.receive()
|
||||
except EndOfStream as exc:
|
||||
raise IncompleteRead from exc
|
||||
|
||||
# Move the offset forward and add the new data to the buffer
|
||||
offset = max(len(self._buffer) - delimiter_size + 1, 0)
|
||||
self._buffer.extend(data)
|
||||
@@ -0,0 +1,148 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from io import SEEK_SET, UnsupportedOperation
|
||||
from os import PathLike
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, cast
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
ClosedResourceError,
|
||||
EndOfStream,
|
||||
TypedAttributeSet,
|
||||
to_thread,
|
||||
typed_attribute,
|
||||
)
|
||||
from ..abc import ByteReceiveStream, ByteSendStream
|
||||
|
||||
|
||||
class FileStreamAttribute(TypedAttributeSet):
|
||||
#: the open file descriptor
|
||||
file: BinaryIO = typed_attribute()
|
||||
#: the path of the file on the file system, if available (file must be a real file)
|
||||
path: Path = typed_attribute()
|
||||
#: the file number, if available (file must be a real file or a TTY)
|
||||
fileno: int = typed_attribute()
|
||||
|
||||
|
||||
class _BaseFileStream:
|
||||
def __init__(self, file: BinaryIO):
|
||||
self._file = file
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await to_thread.run_sync(self._file.close)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
attributes: dict[Any, Callable[[], Any]] = {
|
||||
FileStreamAttribute.file: lambda: self._file,
|
||||
}
|
||||
|
||||
if hasattr(self._file, "name"):
|
||||
attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
|
||||
|
||||
try:
|
||||
self._file.fileno()
|
||||
except UnsupportedOperation:
|
||||
pass
|
||||
else:
|
||||
attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
class FileReadStream(_BaseFileStream, ByteReceiveStream):
|
||||
"""
|
||||
A byte stream that reads from a file in the file system.
|
||||
|
||||
:param file: a file that has been opened for reading in binary mode
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
|
||||
"""
|
||||
Create a file read stream by opening the given file.
|
||||
|
||||
:param path: path of the file to read from
|
||||
|
||||
"""
|
||||
file = await to_thread.run_sync(Path(path).open, "rb")
|
||||
return cls(cast(BinaryIO, file))
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
try:
|
||||
data = await to_thread.run_sync(self._file.read, max_bytes)
|
||||
except ValueError:
|
||||
raise ClosedResourceError from None
|
||||
except OSError as exc:
|
||||
raise BrokenResourceError from exc
|
||||
|
||||
if data:
|
||||
return data
|
||||
else:
|
||||
raise EndOfStream
|
||||
|
||||
async def seek(self, position: int, whence: int = SEEK_SET) -> int:
|
||||
"""
|
||||
Seek the file to the given position.
|
||||
|
||||
.. seealso:: :meth:`io.IOBase.seek`
|
||||
|
||||
.. note:: Not all file descriptors are seekable.
|
||||
|
||||
:param position: position to seek the file to
|
||||
:param whence: controls how ``position`` is interpreted
|
||||
:return: the new absolute position
|
||||
:raises OSError: if the file is not seekable
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(self._file.seek, position, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
"""
|
||||
Return the current stream position.
|
||||
|
||||
.. note:: Not all file descriptors are seekable.
|
||||
|
||||
:return: the current absolute position
|
||||
:raises OSError: if the file is not seekable
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(self._file.tell)
|
||||
|
||||
|
||||
class FileWriteStream(_BaseFileStream, ByteSendStream):
|
||||
"""
|
||||
A byte stream that writes to a file in the file system.
|
||||
|
||||
:param file: a file that has been opened for writing in binary mode
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def from_path(
|
||||
cls, path: str | PathLike[str], append: bool = False
|
||||
) -> FileWriteStream:
|
||||
"""
|
||||
Create a file write stream by opening the given file for writing.
|
||||
|
||||
:param path: path of the file to write to
|
||||
:param append: if ``True``, open the file for appending; if ``False``, any
|
||||
existing file at the given path will be truncated
|
||||
|
||||
"""
|
||||
mode = "ab" if append else "wb"
|
||||
file = await to_thread.run_sync(Path(path).open, mode)
|
||||
return cls(cast(BinaryIO, file))
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
try:
|
||||
await to_thread.run_sync(self._file.write, item)
|
||||
except ValueError:
|
||||
raise ClosedResourceError from None
|
||||
except OSError as exc:
|
||||
raise BrokenResourceError from exc
|
||||
@@ -0,0 +1,317 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from collections import OrderedDict, deque
|
||||
from dataclasses import dataclass, field
|
||||
from types import TracebackType
|
||||
from typing import Generic, NamedTuple, TypeVar
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
ClosedResourceError,
|
||||
EndOfStream,
|
||||
WouldBlock,
|
||||
)
|
||||
from .._core._testing import TaskInfo, get_current_task
|
||||
from ..abc import Event, ObjectReceiveStream, ObjectSendStream
|
||||
from ..lowlevel import checkpoint
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
|
||||
|
||||
class MemoryObjectStreamStatistics(NamedTuple):
|
||||
current_buffer_used: int #: number of items stored in the buffer
|
||||
#: maximum number of items that can be stored on this stream (or :data:`math.inf`)
|
||||
max_buffer_size: float
|
||||
open_send_streams: int #: number of unclosed clones of the send stream
|
||||
open_receive_streams: int #: number of unclosed clones of the receive stream
|
||||
#: number of tasks blocked on :meth:`MemoryObjectSendStream.send`
|
||||
tasks_waiting_send: int
|
||||
#: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`
|
||||
tasks_waiting_receive: int
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectItemReceiver(Generic[T_Item]):
|
||||
task_info: TaskInfo = field(init=False, default_factory=get_current_task)
|
||||
item: T_Item = field(init=False)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
# When item is not defined, we get following error with default __repr__:
|
||||
# AttributeError: 'MemoryObjectItemReceiver' object has no attribute 'item'
|
||||
item = getattr(self, "item", None)
|
||||
return f"{self.__class__.__name__}(task_info={self.task_info}, item={item!r})"
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectStreamState(Generic[T_Item]):
|
||||
max_buffer_size: float = field()
|
||||
buffer: deque[T_Item] = field(init=False, default_factory=deque)
|
||||
open_send_channels: int = field(init=False, default=0)
|
||||
open_receive_channels: int = field(init=False, default=0)
|
||||
waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field(
|
||||
init=False, default_factory=OrderedDict
|
||||
)
|
||||
waiting_senders: OrderedDict[Event, T_Item] = field(
|
||||
init=False, default_factory=OrderedDict
|
||||
)
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
return MemoryObjectStreamStatistics(
|
||||
len(self.buffer),
|
||||
self.max_buffer_size,
|
||||
self.open_send_channels,
|
||||
self.open_receive_channels,
|
||||
len(self.waiting_senders),
|
||||
len(self.waiting_receivers),
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):
|
||||
_state: MemoryObjectStreamState[T_co]
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self._state.open_receive_channels += 1
|
||||
|
||||
def receive_nowait(self) -> T_co:
|
||||
"""
|
||||
Receive the next item if it can be done without waiting.
|
||||
|
||||
:return: the received item
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.EndOfStream: if the buffer is empty and this stream has been
|
||||
closed from the sending end
|
||||
:raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks
|
||||
waiting to send
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
if self._state.waiting_senders:
|
||||
# Get the item from the next sender
|
||||
send_event, item = self._state.waiting_senders.popitem(last=False)
|
||||
self._state.buffer.append(item)
|
||||
send_event.set()
|
||||
|
||||
if self._state.buffer:
|
||||
return self._state.buffer.popleft()
|
||||
elif not self._state.open_send_channels:
|
||||
raise EndOfStream
|
||||
|
||||
raise WouldBlock
|
||||
|
||||
async def receive(self) -> T_co:
|
||||
await checkpoint()
|
||||
try:
|
||||
return self.receive_nowait()
|
||||
except WouldBlock:
|
||||
# Add ourselves in the queue
|
||||
receive_event = Event()
|
||||
receiver = MemoryObjectItemReceiver[T_co]()
|
||||
self._state.waiting_receivers[receive_event] = receiver
|
||||
|
||||
try:
|
||||
await receive_event.wait()
|
||||
finally:
|
||||
self._state.waiting_receivers.pop(receive_event, None)
|
||||
|
||||
try:
|
||||
return receiver.item
|
||||
except AttributeError:
|
||||
raise EndOfStream from None
|
||||
|
||||
def clone(self) -> MemoryObjectReceiveStream[T_co]:
|
||||
"""
|
||||
Create a clone of this receive stream.
|
||||
|
||||
Each clone can be closed separately. Only when all clones have been closed will
|
||||
the receiving end of the memory stream be considered closed by the sending ends.
|
||||
|
||||
:return: the cloned stream
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
return MemoryObjectReceiveStream(_state=self._state)
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the stream.
|
||||
|
||||
This works the exact same way as :meth:`aclose`, but is provided as a special
|
||||
case for the benefit of synchronous callbacks.
|
||||
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._state.open_receive_channels -= 1
|
||||
if self._state.open_receive_channels == 0:
|
||||
send_events = list(self._state.waiting_senders.keys())
|
||||
for event in send_events:
|
||||
event.set()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
self.close()
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this stream.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return self._state.statistics()
|
||||
|
||||
def __enter__(self) -> MemoryObjectReceiveStream[T_co]:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._closed:
|
||||
warnings.warn(
|
||||
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
|
||||
ResourceWarning,
|
||||
source=self,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):
|
||||
_state: MemoryObjectStreamState[T_contra]
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self._state.open_send_channels += 1
|
||||
|
||||
def send_nowait(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item immediately if it can be done without waiting.
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
||||
receiving end
|
||||
:raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting
|
||||
to receive
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
if not self._state.open_receive_channels:
|
||||
raise BrokenResourceError
|
||||
|
||||
while self._state.waiting_receivers:
|
||||
receive_event, receiver = self._state.waiting_receivers.popitem(last=False)
|
||||
if not receiver.task_info.has_pending_cancellation():
|
||||
receiver.item = item
|
||||
receive_event.set()
|
||||
return
|
||||
|
||||
if len(self._state.buffer) < self._state.max_buffer_size:
|
||||
self._state.buffer.append(item)
|
||||
else:
|
||||
raise WouldBlock
|
||||
|
||||
async def send(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item to the stream.
|
||||
|
||||
If the buffer is full, this method blocks until there is again room in the
|
||||
buffer or the item can be sent directly to a receiver.
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
||||
receiving end
|
||||
|
||||
"""
|
||||
await checkpoint()
|
||||
try:
|
||||
self.send_nowait(item)
|
||||
except WouldBlock:
|
||||
# Wait until there's someone on the receiving end
|
||||
send_event = Event()
|
||||
self._state.waiting_senders[send_event] = item
|
||||
try:
|
||||
await send_event.wait()
|
||||
except BaseException:
|
||||
self._state.waiting_senders.pop(send_event, None)
|
||||
raise
|
||||
|
||||
if send_event in self._state.waiting_senders:
|
||||
del self._state.waiting_senders[send_event]
|
||||
raise BrokenResourceError from None
|
||||
|
||||
def clone(self) -> MemoryObjectSendStream[T_contra]:
|
||||
"""
|
||||
Create a clone of this send stream.
|
||||
|
||||
Each clone can be closed separately. Only when all clones have been closed will
|
||||
the sending end of the memory stream be considered closed by the receiving ends.
|
||||
|
||||
:return: the cloned stream
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
return MemoryObjectSendStream(_state=self._state)
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the stream.
|
||||
|
||||
This works the exact same way as :meth:`aclose`, but is provided as a special
|
||||
case for the benefit of synchronous callbacks.
|
||||
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._state.open_send_channels -= 1
|
||||
if self._state.open_send_channels == 0:
|
||||
receive_events = list(self._state.waiting_receivers.keys())
|
||||
self._state.waiting_receivers.clear()
|
||||
for event in receive_events:
|
||||
event.set()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
self.close()
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this stream.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return self._state.statistics()
|
||||
|
||||
def __enter__(self) -> MemoryObjectSendStream[T_contra]:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._closed:
|
||||
warnings.warn(
|
||||
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
|
||||
ResourceWarning,
|
||||
source=self,
|
||||
)
|
||||
@@ -0,0 +1,141 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping, Sequence
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from ..abc import (
|
||||
ByteReceiveStream,
|
||||
ByteSendStream,
|
||||
ByteStream,
|
||||
Listener,
|
||||
ObjectReceiveStream,
|
||||
ObjectSendStream,
|
||||
ObjectStream,
|
||||
TaskGroup,
|
||||
)
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_Stream = TypeVar("T_Stream")
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class StapledByteStream(ByteStream):
|
||||
"""
|
||||
Combines two byte streams into a single, bidirectional byte stream.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param ByteSendStream send_stream: the sending byte stream
|
||||
:param ByteReceiveStream receive_stream: the receiving byte stream
|
||||
"""
|
||||
|
||||
send_stream: ByteSendStream
|
||||
receive_stream: ByteReceiveStream
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
return await self.receive_stream.receive(max_bytes)
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
await self.send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
await self.receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.send_stream.extra_attributes,
|
||||
**self.receive_stream.extra_attributes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
|
||||
"""
|
||||
Combines two object streams into a single, bidirectional object stream.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param ObjectSendStream send_stream: the sending object stream
|
||||
:param ObjectReceiveStream receive_stream: the receiving object stream
|
||||
"""
|
||||
|
||||
send_stream: ObjectSendStream[T_Item]
|
||||
receive_stream: ObjectReceiveStream[T_Item]
|
||||
|
||||
async def receive(self) -> T_Item:
|
||||
return await self.receive_stream.receive()
|
||||
|
||||
async def send(self, item: T_Item) -> None:
|
||||
await self.send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
await self.receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.send_stream.extra_attributes,
|
||||
**self.receive_stream.extra_attributes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MultiListener(Generic[T_Stream], Listener[T_Stream]):
|
||||
"""
|
||||
Combines multiple listeners into one, serving connections from all of them at once.
|
||||
|
||||
Any MultiListeners in the given collection of listeners will have their listeners
|
||||
moved into this one.
|
||||
|
||||
Extra attributes are provided from each listener, with each successive listener
|
||||
overriding any conflicting attributes from the previous one.
|
||||
|
||||
:param listeners: listeners to serve
|
||||
:type listeners: Sequence[Listener[T_Stream]]
|
||||
"""
|
||||
|
||||
listeners: Sequence[Listener[T_Stream]]
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
listeners: list[Listener[T_Stream]] = []
|
||||
for listener in self.listeners:
|
||||
if isinstance(listener, MultiListener):
|
||||
listeners.extend(listener.listeners)
|
||||
del listener.listeners[:] # type: ignore[attr-defined]
|
||||
else:
|
||||
listeners.append(listener)
|
||||
|
||||
self.listeners = listeners
|
||||
|
||||
async def serve(
|
||||
self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
|
||||
) -> None:
|
||||
from .. import create_task_group
|
||||
|
||||
async with create_task_group() as tg:
|
||||
for listener in self.listeners:
|
||||
tg.start_soon(listener.serve, handler, task_group)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
for listener in self.listeners:
|
||||
await listener.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
attributes: dict = {}
|
||||
for listener in self.listeners:
|
||||
attributes.update(listener.extra_attributes)
|
||||
|
||||
return attributes
|
||||
@@ -0,0 +1,147 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import InitVar, dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from ..abc import (
|
||||
AnyByteReceiveStream,
|
||||
AnyByteSendStream,
|
||||
AnyByteStream,
|
||||
ObjectReceiveStream,
|
||||
ObjectSendStream,
|
||||
ObjectStream,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextReceiveStream(ObjectReceiveStream[str]):
|
||||
"""
|
||||
Stream wrapper that decodes bytes to strings using the given encoding.
|
||||
|
||||
Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
|
||||
completely received unicode characters as soon as they come in.
|
||||
|
||||
:param transport_stream: any bytes-based receive stream
|
||||
:param encoding: character encoding to use for decoding bytes to strings (defaults
|
||||
to ``utf-8``)
|
||||
:param errors: handling scheme for decoding errors (defaults to ``strict``; see the
|
||||
`codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteReceiveStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: InitVar[str] = "strict"
|
||||
_decoder: codecs.IncrementalDecoder = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str, errors: str) -> None:
|
||||
decoder_class = codecs.getincrementaldecoder(encoding)
|
||||
self._decoder = decoder_class(errors=errors)
|
||||
|
||||
async def receive(self) -> str:
|
||||
while True:
|
||||
chunk = await self.transport_stream.receive()
|
||||
decoded = self._decoder.decode(chunk)
|
||||
if decoded:
|
||||
return decoded
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.transport_stream.aclose()
|
||||
self._decoder.reset()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.transport_stream.extra_attributes
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextSendStream(ObjectSendStream[str]):
|
||||
"""
|
||||
Sends strings to the wrapped stream as bytes using the given encoding.
|
||||
|
||||
:param AnyByteSendStream transport_stream: any bytes-based send stream
|
||||
:param str encoding: character encoding to use for encoding strings to bytes
|
||||
(defaults to ``utf-8``)
|
||||
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
||||
the `codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteSendStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: str = "strict"
|
||||
_encoder: Callable[..., tuple[bytes, int]] = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str) -> None:
|
||||
self._encoder = codecs.getencoder(encoding)
|
||||
|
||||
async def send(self, item: str) -> None:
|
||||
encoded = self._encoder(item, self.errors)[0]
|
||||
await self.transport_stream.send(encoded)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.transport_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.transport_stream.extra_attributes
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextStream(ObjectStream[str]):
|
||||
"""
|
||||
A bidirectional stream that decodes bytes to strings on receive and encodes strings
|
||||
to bytes on send.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param AnyByteStream transport_stream: any bytes-based stream
|
||||
:param str encoding: character encoding to use for encoding/decoding strings to/from
|
||||
bytes (defaults to ``utf-8``)
|
||||
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
||||
the `codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: InitVar[str] = "strict"
|
||||
_receive_stream: TextReceiveStream = field(init=False)
|
||||
_send_stream: TextSendStream = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str, errors: str) -> None:
|
||||
self._receive_stream = TextReceiveStream(
|
||||
self.transport_stream, encoding=encoding, errors=errors
|
||||
)
|
||||
self._send_stream = TextSendStream(
|
||||
self.transport_stream, encoding=encoding, errors=errors
|
||||
)
|
||||
|
||||
async def receive(self) -> str:
|
||||
return await self._receive_stream.receive()
|
||||
|
||||
async def send(self, item: str) -> None:
|
||||
await self._send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.transport_stream.send_eof()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self._send_stream.aclose()
|
||||
await self._receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self._send_stream.extra_attributes,
|
||||
**self._receive_stream.extra_attributes,
|
||||
}
|
||||
@@ -0,0 +1,352 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import dataclass
|
||||
from functools import wraps
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
EndOfStream,
|
||||
aclose_forcefully,
|
||||
get_cancelled_exc_class,
|
||||
to_thread,
|
||||
)
|
||||
from .._core._typedattr import TypedAttributeSet, typed_attribute
|
||||
from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
_PCTRTT = tuple[tuple[str, str], ...]
|
||||
_PCTRTTT = tuple[_PCTRTT, ...]
|
||||
|
||||
|
||||
class TLSAttribute(TypedAttributeSet):
|
||||
"""Contains Transport Layer Security related attributes."""
|
||||
|
||||
#: the selected ALPN protocol
|
||||
alpn_protocol: str | None = typed_attribute()
|
||||
#: the channel binding for type ``tls-unique``
|
||||
channel_binding_tls_unique: bytes = typed_attribute()
|
||||
#: the selected cipher
|
||||
cipher: tuple[str, str, int] = typed_attribute()
|
||||
#: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`
|
||||
# for more information)
|
||||
peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()
|
||||
#: the peer certificate in binary form
|
||||
peer_certificate_binary: bytes | None = typed_attribute()
|
||||
#: ``True`` if this is the server side of the connection
|
||||
server_side: bool = typed_attribute()
|
||||
#: ciphers shared by the client during the TLS handshake (``None`` if this is the
|
||||
#: client side)
|
||||
shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()
|
||||
#: the :class:`~ssl.SSLObject` used for encryption
|
||||
ssl_object: ssl.SSLObject = typed_attribute()
|
||||
#: ``True`` if this stream does (and expects) a closing TLS handshake when the
|
||||
#: stream is being closed
|
||||
standard_compatible: bool = typed_attribute()
|
||||
#: the TLS protocol version (e.g. ``TLSv1.2``)
|
||||
tls_version: str = typed_attribute()
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TLSStream(ByteStream):
|
||||
"""
|
||||
A stream wrapper that encrypts all sent data and decrypts received data.
|
||||
|
||||
This class has no public initializer; use :meth:`wrap` instead.
|
||||
All extra attributes from :class:`~TLSAttribute` are supported.
|
||||
|
||||
:var AnyByteStream transport_stream: the wrapped stream
|
||||
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteStream
|
||||
standard_compatible: bool
|
||||
_ssl_object: ssl.SSLObject
|
||||
_read_bio: ssl.MemoryBIO
|
||||
_write_bio: ssl.MemoryBIO
|
||||
|
||||
@classmethod
|
||||
async def wrap(
|
||||
cls,
|
||||
transport_stream: AnyByteStream,
|
||||
*,
|
||||
server_side: bool | None = None,
|
||||
hostname: str | None = None,
|
||||
ssl_context: ssl.SSLContext | None = None,
|
||||
standard_compatible: bool = True,
|
||||
) -> TLSStream:
|
||||
"""
|
||||
Wrap an existing stream with Transport Layer Security.
|
||||
|
||||
This performs a TLS handshake with the peer.
|
||||
|
||||
:param transport_stream: a bytes-transporting stream to wrap
|
||||
:param server_side: ``True`` if this is the server side of the connection,
|
||||
``False`` if this is the client side (if omitted, will be set to ``False``
|
||||
if ``hostname`` has been provided, ``False`` otherwise). Used only to create
|
||||
a default context when an explicit context has not been provided.
|
||||
:param hostname: host name of the peer (if host name checking is desired)
|
||||
:param ssl_context: the SSLContext object to use (if not provided, a secure
|
||||
default will be created)
|
||||
:param standard_compatible: if ``False``, skip the closing handshake when
|
||||
closing the connection, and don't raise an exception if the peer does the
|
||||
same
|
||||
:raises ~ssl.SSLError: if the TLS handshake fails
|
||||
|
||||
"""
|
||||
if server_side is None:
|
||||
server_side = not hostname
|
||||
|
||||
if not ssl_context:
|
||||
purpose = (
|
||||
ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH
|
||||
)
|
||||
ssl_context = ssl.create_default_context(purpose)
|
||||
|
||||
# Re-enable detection of unexpected EOFs if it was disabled by Python
|
||||
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
|
||||
ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
|
||||
|
||||
bio_in = ssl.MemoryBIO()
|
||||
bio_out = ssl.MemoryBIO()
|
||||
|
||||
# External SSLContext implementations may do blocking I/O in wrap_bio(),
|
||||
# but the standard library implementation won't
|
||||
if type(ssl_context) is ssl.SSLContext:
|
||||
ssl_object = ssl_context.wrap_bio(
|
||||
bio_in, bio_out, server_side=server_side, server_hostname=hostname
|
||||
)
|
||||
else:
|
||||
ssl_object = await to_thread.run_sync(
|
||||
ssl_context.wrap_bio,
|
||||
bio_in,
|
||||
bio_out,
|
||||
server_side,
|
||||
hostname,
|
||||
None,
|
||||
)
|
||||
|
||||
wrapper = cls(
|
||||
transport_stream=transport_stream,
|
||||
standard_compatible=standard_compatible,
|
||||
_ssl_object=ssl_object,
|
||||
_read_bio=bio_in,
|
||||
_write_bio=bio_out,
|
||||
)
|
||||
await wrapper._call_sslobject_method(ssl_object.do_handshake)
|
||||
return wrapper
|
||||
|
||||
async def _call_sslobject_method(
|
||||
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
while True:
|
||||
try:
|
||||
result = func(*args)
|
||||
except ssl.SSLWantReadError:
|
||||
try:
|
||||
# Flush any pending writes first
|
||||
if self._write_bio.pending:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
|
||||
data = await self.transport_stream.receive()
|
||||
except EndOfStream:
|
||||
self._read_bio.write_eof()
|
||||
except OSError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
raise BrokenResourceError from exc
|
||||
else:
|
||||
self._read_bio.write(data)
|
||||
except ssl.SSLWantWriteError:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
except ssl.SSLSyscallError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
raise BrokenResourceError from exc
|
||||
except ssl.SSLError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
if isinstance(exc, ssl.SSLEOFError) or (
|
||||
exc.strerror and "UNEXPECTED_EOF_WHILE_READING" in exc.strerror
|
||||
):
|
||||
if self.standard_compatible:
|
||||
raise BrokenResourceError from exc
|
||||
else:
|
||||
raise EndOfStream from None
|
||||
|
||||
raise
|
||||
else:
|
||||
# Flush any pending writes first
|
||||
if self._write_bio.pending:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
|
||||
return result
|
||||
|
||||
async def unwrap(self) -> tuple[AnyByteStream, bytes]:
|
||||
"""
|
||||
Does the TLS closing handshake.
|
||||
|
||||
:return: a tuple of (wrapped byte stream, bytes left in the read buffer)
|
||||
|
||||
"""
|
||||
await self._call_sslobject_method(self._ssl_object.unwrap)
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
return self.transport_stream, self._read_bio.read()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
if self.standard_compatible:
|
||||
try:
|
||||
await self.unwrap()
|
||||
except BaseException:
|
||||
await aclose_forcefully(self.transport_stream)
|
||||
raise
|
||||
|
||||
await self.transport_stream.aclose()
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)
|
||||
if not data:
|
||||
raise EndOfStream
|
||||
|
||||
return data
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
await self._call_sslobject_method(self._ssl_object.write, item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
tls_version = self.extra(TLSAttribute.tls_version)
|
||||
match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)
|
||||
if match:
|
||||
major, minor = int(match.group(1)), int(match.group(2) or 0)
|
||||
if (major, minor) < (1, 3):
|
||||
raise NotImplementedError(
|
||||
f"send_eof() requires at least TLSv1.3; current "
|
||||
f"session uses {tls_version}"
|
||||
)
|
||||
|
||||
raise NotImplementedError(
|
||||
"send_eof() has not yet been implemented for TLS streams"
|
||||
)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.transport_stream.extra_attributes,
|
||||
TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,
|
||||
TLSAttribute.channel_binding_tls_unique: (
|
||||
self._ssl_object.get_channel_binding
|
||||
),
|
||||
TLSAttribute.cipher: self._ssl_object.cipher,
|
||||
TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),
|
||||
TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(
|
||||
True
|
||||
),
|
||||
TLSAttribute.server_side: lambda: self._ssl_object.server_side,
|
||||
TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()
|
||||
if self._ssl_object.server_side
|
||||
else None,
|
||||
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
||||
TLSAttribute.ssl_object: lambda: self._ssl_object,
|
||||
TLSAttribute.tls_version: self._ssl_object.version,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TLSListener(Listener[TLSStream]):
|
||||
"""
|
||||
A convenience listener that wraps another listener and auto-negotiates a TLS session
|
||||
on every accepted connection.
|
||||
|
||||
If the TLS handshake times out or raises an exception,
|
||||
:meth:`handle_handshake_error` is called to do whatever post-mortem processing is
|
||||
deemed necessary.
|
||||
|
||||
Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.
|
||||
|
||||
:param Listener listener: the listener to wrap
|
||||
:param ssl_context: the SSL context object
|
||||
:param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`
|
||||
:param handshake_timeout: time limit for the TLS handshake
|
||||
(passed to :func:`~anyio.fail_after`)
|
||||
"""
|
||||
|
||||
listener: Listener[Any]
|
||||
ssl_context: ssl.SSLContext
|
||||
standard_compatible: bool = True
|
||||
handshake_timeout: float = 30
|
||||
|
||||
@staticmethod
|
||||
async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:
|
||||
"""
|
||||
Handle an exception raised during the TLS handshake.
|
||||
|
||||
This method does 3 things:
|
||||
|
||||
#. Forcefully closes the original stream
|
||||
#. Logs the exception (unless it was a cancellation exception) using the
|
||||
``anyio.streams.tls`` logger
|
||||
#. Reraises the exception if it was a base exception or a cancellation exception
|
||||
|
||||
:param exc: the exception
|
||||
:param stream: the original stream
|
||||
|
||||
"""
|
||||
await aclose_forcefully(stream)
|
||||
|
||||
# Log all except cancellation exceptions
|
||||
if not isinstance(exc, get_cancelled_exc_class()):
|
||||
# CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using
|
||||
# any asyncio implementation, so we explicitly pass the exception to log
|
||||
# (https://github.com/python/cpython/issues/108668). Trio does not have this
|
||||
# issue because it works around the CPython bug.
|
||||
logging.getLogger(__name__).exception(
|
||||
"Error during TLS handshake", exc_info=exc
|
||||
)
|
||||
|
||||
# Only reraise base exceptions and cancellation exceptions
|
||||
if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):
|
||||
raise
|
||||
|
||||
async def serve(
|
||||
self,
|
||||
handler: Callable[[TLSStream], Any],
|
||||
task_group: TaskGroup | None = None,
|
||||
) -> None:
|
||||
@wraps(handler)
|
||||
async def handler_wrapper(stream: AnyByteStream) -> None:
|
||||
from .. import fail_after
|
||||
|
||||
try:
|
||||
with fail_after(self.handshake_timeout):
|
||||
wrapped_stream = await TLSStream.wrap(
|
||||
stream,
|
||||
ssl_context=self.ssl_context,
|
||||
standard_compatible=self.standard_compatible,
|
||||
)
|
||||
except BaseException as exc:
|
||||
await self.handle_handshake_error(exc, stream)
|
||||
else:
|
||||
await handler(wrapped_stream)
|
||||
|
||||
await self.listener.serve(handler_wrapper, task_group)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.listener.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
||||
}
|
||||
@@ -0,0 +1,218 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import atexit
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
from collections import deque
|
||||
from collections.abc import Callable
|
||||
from textwrap import dedent
|
||||
from typing import Any, Final, TypeVar
|
||||
|
||||
from . import current_time, to_thread
|
||||
from ._core._exceptions import BrokenWorkerIntepreter
|
||||
from ._core._synchronization import CapacityLimiter
|
||||
from .lowlevel import RunVar
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
UNBOUND: Final = 2 # I have no clue how this works, but it was used in the stdlib
|
||||
FMT_UNPICKLED: Final = 0
|
||||
FMT_PICKLED: Final = 1
|
||||
DEFAULT_CPU_COUNT: Final = 8 # this is just an arbitrarily selected value
|
||||
MAX_WORKER_IDLE_TIME = (
|
||||
30 # seconds a subinterpreter can be idle before becoming eligible for pruning
|
||||
)
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
_idle_workers = RunVar[deque["Worker"]]("_available_workers")
|
||||
_default_interpreter_limiter = RunVar[CapacityLimiter]("_default_interpreter_limiter")
|
||||
|
||||
|
||||
class Worker:
|
||||
_run_func = compile(
|
||||
dedent("""
|
||||
import _interpqueues as queues
|
||||
import _interpreters as interpreters
|
||||
from pickle import loads, dumps, HIGHEST_PROTOCOL
|
||||
|
||||
item = queues.get(queue_id)[0]
|
||||
try:
|
||||
func, args = loads(item)
|
||||
retval = func(*args)
|
||||
except BaseException as exc:
|
||||
is_exception = True
|
||||
retval = exc
|
||||
else:
|
||||
is_exception = False
|
||||
|
||||
try:
|
||||
queues.put(queue_id, (retval, is_exception), FMT_UNPICKLED, UNBOUND)
|
||||
except interpreters.NotShareableError:
|
||||
retval = dumps(retval, HIGHEST_PROTOCOL)
|
||||
queues.put(queue_id, (retval, is_exception), FMT_PICKLED, UNBOUND)
|
||||
"""),
|
||||
"<string>",
|
||||
"exec",
|
||||
)
|
||||
|
||||
last_used: float = 0
|
||||
|
||||
_initialized: bool = False
|
||||
_interpreter_id: int
|
||||
_queue_id: int
|
||||
|
||||
def initialize(self) -> None:
|
||||
import _interpqueues as queues
|
||||
import _interpreters as interpreters
|
||||
|
||||
self._interpreter_id = interpreters.create()
|
||||
self._queue_id = queues.create(2, FMT_UNPICKLED, UNBOUND)
|
||||
self._initialized = True
|
||||
interpreters.set___main___attrs(
|
||||
self._interpreter_id,
|
||||
{
|
||||
"queue_id": self._queue_id,
|
||||
"FMT_PICKLED": FMT_PICKLED,
|
||||
"FMT_UNPICKLED": FMT_UNPICKLED,
|
||||
"UNBOUND": UNBOUND,
|
||||
},
|
||||
)
|
||||
|
||||
def destroy(self) -> None:
|
||||
import _interpqueues as queues
|
||||
import _interpreters as interpreters
|
||||
|
||||
if self._initialized:
|
||||
interpreters.destroy(self._interpreter_id)
|
||||
queues.destroy(self._queue_id)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
func: Callable[..., T_Retval],
|
||||
args: tuple[Any],
|
||||
) -> tuple[Any, bool]:
|
||||
import _interpqueues as queues
|
||||
import _interpreters as interpreters
|
||||
|
||||
if not self._initialized:
|
||||
self.initialize()
|
||||
|
||||
payload = pickle.dumps((func, args), pickle.HIGHEST_PROTOCOL)
|
||||
queues.put(self._queue_id, payload, FMT_PICKLED, UNBOUND)
|
||||
|
||||
res: Any
|
||||
is_exception: bool
|
||||
if exc_info := interpreters.exec(self._interpreter_id, self._run_func):
|
||||
raise BrokenWorkerIntepreter(exc_info)
|
||||
|
||||
(res, is_exception), fmt = queues.get(self._queue_id)[:2]
|
||||
if fmt == FMT_PICKLED:
|
||||
res = pickle.loads(res)
|
||||
|
||||
return res, is_exception
|
||||
|
||||
async def call(
|
||||
self,
|
||||
func: Callable[..., T_Retval],
|
||||
args: tuple[Any],
|
||||
limiter: CapacityLimiter,
|
||||
) -> T_Retval:
|
||||
result, is_exception = await to_thread.run_sync(
|
||||
self._call,
|
||||
func,
|
||||
args,
|
||||
limiter=limiter,
|
||||
)
|
||||
if is_exception:
|
||||
raise result
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _stop_workers(workers: deque[Worker]) -> None:
|
||||
for worker in workers:
|
||||
worker.destroy()
|
||||
|
||||
workers.clear()
|
||||
|
||||
|
||||
async def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function with the given arguments in a subinterpreter.
|
||||
|
||||
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
||||
cancelled, the call will still run its course but its return value (or any raised
|
||||
exception) will be ignored.
|
||||
|
||||
.. warning:: This feature is **experimental**. The upstream interpreter API has not
|
||||
yet been finalized or thoroughly tested, so don't rely on this for anything
|
||||
mission critical.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:param limiter: capacity limiter to use to limit the total amount of subinterpreters
|
||||
running (if omitted, the default limiter is used)
|
||||
:return: the result of the call
|
||||
:raises BrokenWorkerIntepreter: if there's an internal error in a subinterpreter
|
||||
|
||||
"""
|
||||
if sys.version_info <= (3, 13):
|
||||
raise RuntimeError("subinterpreters require at least Python 3.13")
|
||||
|
||||
if limiter is None:
|
||||
limiter = current_default_interpreter_limiter()
|
||||
|
||||
try:
|
||||
idle_workers = _idle_workers.get()
|
||||
except LookupError:
|
||||
idle_workers = deque()
|
||||
_idle_workers.set(idle_workers)
|
||||
atexit.register(_stop_workers, idle_workers)
|
||||
|
||||
async with limiter:
|
||||
try:
|
||||
worker = idle_workers.pop()
|
||||
except IndexError:
|
||||
worker = Worker()
|
||||
|
||||
try:
|
||||
return await worker.call(func, args, limiter)
|
||||
finally:
|
||||
# Prune workers that have been idle for too long
|
||||
now = current_time()
|
||||
while idle_workers:
|
||||
if now - idle_workers[0].last_used <= MAX_WORKER_IDLE_TIME:
|
||||
break
|
||||
|
||||
await to_thread.run_sync(idle_workers.popleft().destroy, limiter=limiter)
|
||||
|
||||
worker.last_used = current_time()
|
||||
idle_workers.append(worker)
|
||||
|
||||
|
||||
def current_default_interpreter_limiter() -> CapacityLimiter:
|
||||
"""
|
||||
Return the capacity limiter that is used by default to limit the number of
|
||||
concurrently running subinterpreters.
|
||||
|
||||
Defaults to the number of CPU cores.
|
||||
|
||||
:return: a capacity limiter object
|
||||
|
||||
"""
|
||||
try:
|
||||
return _default_interpreter_limiter.get()
|
||||
except LookupError:
|
||||
limiter = CapacityLimiter(os.cpu_count() or DEFAULT_CPU_COUNT)
|
||||
_default_interpreter_limiter.set(limiter)
|
||||
return limiter
|
||||
@@ -0,0 +1,258 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import deque
|
||||
from collections.abc import Callable
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from typing import TypeVar, cast
|
||||
|
||||
from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
|
||||
from ._core._exceptions import BrokenWorkerProcess
|
||||
from ._core._subprocesses import open_process
|
||||
from ._core._synchronization import CapacityLimiter
|
||||
from ._core._tasks import CancelScope, fail_after
|
||||
from .abc import ByteReceiveStream, ByteSendStream, Process
|
||||
from .lowlevel import RunVar, checkpoint_if_cancelled
|
||||
from .streams.buffered import BufferedByteReceiveStream
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
WORKER_MAX_IDLE_TIME = 300 # 5 minutes
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
|
||||
_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
|
||||
"_process_pool_idle_workers"
|
||||
)
|
||||
_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
|
||||
|
||||
|
||||
async def run_sync( # type: ignore[return]
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
cancellable: bool = False,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function with the given arguments in a worker process.
|
||||
|
||||
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
||||
cancelled, the worker process running it will be abruptly terminated using SIGKILL
|
||||
(or ``terminateProcess()`` on Windows).
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:param cancellable: ``True`` to allow cancellation of the operation while it's
|
||||
running
|
||||
:param limiter: capacity limiter to use to limit the total amount of processes
|
||||
running (if omitted, the default limiter is used)
|
||||
:return: an awaitable that yields the return value of the function.
|
||||
|
||||
"""
|
||||
|
||||
async def send_raw_command(pickled_cmd: bytes) -> object:
|
||||
try:
|
||||
await stdin.send(pickled_cmd)
|
||||
response = await buffered.receive_until(b"\n", 50)
|
||||
status, length = response.split(b" ")
|
||||
if status not in (b"RETURN", b"EXCEPTION"):
|
||||
raise RuntimeError(
|
||||
f"Worker process returned unexpected response: {response!r}"
|
||||
)
|
||||
|
||||
pickled_response = await buffered.receive_exactly(int(length))
|
||||
except BaseException as exc:
|
||||
workers.discard(process)
|
||||
try:
|
||||
process.kill()
|
||||
with CancelScope(shield=True):
|
||||
await process.aclose()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
if isinstance(exc, get_cancelled_exc_class()):
|
||||
raise
|
||||
else:
|
||||
raise BrokenWorkerProcess from exc
|
||||
|
||||
retval = pickle.loads(pickled_response)
|
||||
if status == b"EXCEPTION":
|
||||
assert isinstance(retval, BaseException)
|
||||
raise retval
|
||||
else:
|
||||
return retval
|
||||
|
||||
# First pickle the request before trying to reserve a worker process
|
||||
await checkpoint_if_cancelled()
|
||||
request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
# If this is the first run in this event loop thread, set up the necessary variables
|
||||
try:
|
||||
workers = _process_pool_workers.get()
|
||||
idle_workers = _process_pool_idle_workers.get()
|
||||
except LookupError:
|
||||
workers = set()
|
||||
idle_workers = deque()
|
||||
_process_pool_workers.set(workers)
|
||||
_process_pool_idle_workers.set(idle_workers)
|
||||
get_async_backend().setup_process_pool_exit_at_shutdown(workers)
|
||||
|
||||
async with limiter or current_default_process_limiter():
|
||||
# Pop processes from the pool (starting from the most recently used) until we
|
||||
# find one that hasn't exited yet
|
||||
process: Process
|
||||
while idle_workers:
|
||||
process, idle_since = idle_workers.pop()
|
||||
if process.returncode is None:
|
||||
stdin = cast(ByteSendStream, process.stdin)
|
||||
buffered = BufferedByteReceiveStream(
|
||||
cast(ByteReceiveStream, process.stdout)
|
||||
)
|
||||
|
||||
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
|
||||
# seconds or longer
|
||||
now = current_time()
|
||||
killed_processes: list[Process] = []
|
||||
while idle_workers:
|
||||
if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
|
||||
break
|
||||
|
||||
process_to_kill, idle_since = idle_workers.popleft()
|
||||
process_to_kill.kill()
|
||||
workers.remove(process_to_kill)
|
||||
killed_processes.append(process_to_kill)
|
||||
|
||||
with CancelScope(shield=True):
|
||||
for killed_process in killed_processes:
|
||||
await killed_process.aclose()
|
||||
|
||||
break
|
||||
|
||||
workers.remove(process)
|
||||
else:
|
||||
command = [sys.executable, "-u", "-m", __name__]
|
||||
process = await open_process(
|
||||
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
|
||||
)
|
||||
try:
|
||||
stdin = cast(ByteSendStream, process.stdin)
|
||||
buffered = BufferedByteReceiveStream(
|
||||
cast(ByteReceiveStream, process.stdout)
|
||||
)
|
||||
with fail_after(20):
|
||||
message = await buffered.receive(6)
|
||||
|
||||
if message != b"READY\n":
|
||||
raise BrokenWorkerProcess(
|
||||
f"Worker process returned unexpected response: {message!r}"
|
||||
)
|
||||
|
||||
main_module_path = getattr(sys.modules["__main__"], "__file__", None)
|
||||
pickled = pickle.dumps(
|
||||
("init", sys.path, main_module_path),
|
||||
protocol=pickle.HIGHEST_PROTOCOL,
|
||||
)
|
||||
await send_raw_command(pickled)
|
||||
except (BrokenWorkerProcess, get_cancelled_exc_class()):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
process.kill()
|
||||
raise BrokenWorkerProcess(
|
||||
"Error during worker process initialization"
|
||||
) from exc
|
||||
|
||||
workers.add(process)
|
||||
|
||||
with CancelScope(shield=not cancellable):
|
||||
try:
|
||||
return cast(T_Retval, await send_raw_command(request))
|
||||
finally:
|
||||
if process in workers:
|
||||
idle_workers.append((process, current_time()))
|
||||
|
||||
|
||||
def current_default_process_limiter() -> CapacityLimiter:
|
||||
"""
|
||||
Return the capacity limiter that is used by default to limit the number of worker
|
||||
processes.
|
||||
|
||||
:return: a capacity limiter object
|
||||
|
||||
"""
|
||||
try:
|
||||
return _default_process_limiter.get()
|
||||
except LookupError:
|
||||
limiter = CapacityLimiter(os.cpu_count() or 2)
|
||||
_default_process_limiter.set(limiter)
|
||||
return limiter
|
||||
|
||||
|
||||
def process_worker() -> None:
|
||||
# Redirect standard streams to os.devnull so that user code won't interfere with the
|
||||
# parent-worker communication
|
||||
stdin = sys.stdin
|
||||
stdout = sys.stdout
|
||||
sys.stdin = open(os.devnull)
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
|
||||
stdout.buffer.write(b"READY\n")
|
||||
while True:
|
||||
retval = exception = None
|
||||
try:
|
||||
command, *args = pickle.load(stdin.buffer)
|
||||
except EOFError:
|
||||
return
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
else:
|
||||
if command == "run":
|
||||
func, args = args
|
||||
try:
|
||||
retval = func(*args)
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
elif command == "init":
|
||||
main_module_path: str | None
|
||||
sys.path, main_module_path = args
|
||||
del sys.modules["__main__"]
|
||||
if main_module_path and os.path.isfile(main_module_path):
|
||||
# Load the parent's main module but as __mp_main__ instead of
|
||||
# __main__ (like multiprocessing does) to avoid infinite recursion
|
||||
try:
|
||||
spec = spec_from_file_location("__mp_main__", main_module_path)
|
||||
if spec and spec.loader:
|
||||
main = module_from_spec(spec)
|
||||
spec.loader.exec_module(main)
|
||||
sys.modules["__main__"] = main
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
try:
|
||||
if exception is not None:
|
||||
status = b"EXCEPTION"
|
||||
pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
|
||||
else:
|
||||
status = b"RETURN"
|
||||
pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
status = b"EXCEPTION"
|
||||
pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
|
||||
stdout.buffer.write(pickled)
|
||||
|
||||
# Respect SIGTERM
|
||||
if isinstance(exception, SystemExit):
|
||||
raise exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
process_worker()
|
||||
@@ -0,0 +1,69 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Callable
|
||||
from typing import TypeVar
|
||||
from warnings import warn
|
||||
|
||||
from ._core._eventloop import get_async_backend
|
||||
from .abc import CapacityLimiter
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
async def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
abandon_on_cancel: bool = False,
|
||||
cancellable: bool | None = None,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function with the given arguments in a worker thread.
|
||||
|
||||
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
||||
cancelled, the thread will still run its course but its return value (or any raised
|
||||
exception) will be ignored.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
|
||||
unchecked on own) if the host task is cancelled, ``False`` to ignore
|
||||
cancellations in the host task until the operation has completed in the worker
|
||||
thread
|
||||
:param cancellable: deprecated alias of ``abandon_on_cancel``; will override
|
||||
``abandon_on_cancel`` if both parameters are passed
|
||||
:param limiter: capacity limiter to use to limit the total amount of threads running
|
||||
(if omitted, the default limiter is used)
|
||||
:return: an awaitable that yields the return value of the function.
|
||||
|
||||
"""
|
||||
if cancellable is not None:
|
||||
abandon_on_cancel = cancellable
|
||||
warn(
|
||||
"The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
|
||||
"deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
return await get_async_backend().run_sync_in_worker_thread(
|
||||
func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
|
||||
)
|
||||
|
||||
|
||||
def current_default_thread_limiter() -> CapacityLimiter:
|
||||
"""
|
||||
Return the capacity limiter that is used by default to limit the number of
|
||||
concurrent threads.
|
||||
|
||||
:return: a capacity limiter object
|
||||
|
||||
"""
|
||||
return get_async_backend().current_default_thread_limiter()
|
||||
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 litl, LLC.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
@@ -0,0 +1,419 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: backoff
|
||||
Version: 2.2.1
|
||||
Summary: Function decoration for backoff and retry
|
||||
Home-page: https://github.com/litl/backoff
|
||||
License: MIT
|
||||
Keywords: retry,backoff,decorators
|
||||
Author: Bob Green
|
||||
Author-email: rgreen@aquent.com
|
||||
Requires-Python: >=3.7,<4.0
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Topic :: Internet :: WWW/HTTP
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Topic :: Utilities
|
||||
Project-URL: Repository, https://github.com/litl/backoff
|
||||
Description-Content-Type: text/x-rst
|
||||
|
||||
backoff
|
||||
=======
|
||||
|
||||
.. image:: https://travis-ci.org/litl/backoff.svg
|
||||
:target: https://travis-ci.org/litl/backoff
|
||||
.. image:: https://coveralls.io/repos/litl/backoff/badge.svg
|
||||
:target: https://coveralls.io/r/litl/backoff?branch=python-3
|
||||
.. image:: https://github.com/litl/backoff/workflows/CodeQL/badge.svg
|
||||
:target: https://github.com/litl/backoff/actions/workflows/codeql-analysis.yml
|
||||
.. image:: https://img.shields.io/pypi/v/backoff.svg
|
||||
:target: https://pypi.python.org/pypi/backoff
|
||||
.. image:: https://img.shields.io/github/license/litl/backoff
|
||||
:target: https://github.com/litl/backoff/blob/master/LICENSE
|
||||
|
||||
**Function decoration for backoff and retry**
|
||||
|
||||
This module provides function decorators which can be used to wrap a
|
||||
function such that it will be retried until some condition is met. It
|
||||
is meant to be of use when accessing unreliable resources with the
|
||||
potential for intermittent failures i.e. network resources and external
|
||||
APIs. Somewhat more generally, it may also be of use for dynamically
|
||||
polling resources for externally generated content.
|
||||
|
||||
Decorators support both regular functions for synchronous code and
|
||||
`asyncio <https://docs.python.org/3/library/asyncio.html>`__'s coroutines
|
||||
for asynchronous code.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Since Kenneth Reitz's `requests <http://python-requests.org>`_ module
|
||||
has become a defacto standard for synchronous HTTP clients in Python,
|
||||
networking examples below are written using it, but it is in no way required
|
||||
by the backoff module.
|
||||
|
||||
@backoff.on_exception
|
||||
---------------------
|
||||
|
||||
The ``on_exception`` decorator is used to retry when a specified exception
|
||||
is raised. Here's an example using exponential backoff when any
|
||||
``requests`` exception is raised:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException)
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
The decorator will also accept a tuple of exceptions for cases where
|
||||
the same backoff behavior is desired for more than one exception type:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
(requests.exceptions.Timeout,
|
||||
requests.exceptions.ConnectionError))
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
**Give Up Conditions**
|
||||
|
||||
Optional keyword arguments can specify conditions under which to give
|
||||
up.
|
||||
|
||||
The keyword argument ``max_time`` specifies the maximum amount
|
||||
of total time in seconds that can elapse before giving up.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
max_time=60)
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
|
||||
Keyword argument ``max_tries`` specifies the maximum number of calls
|
||||
to make to the target function before giving up.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
max_tries=8,
|
||||
jitter=None)
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
|
||||
In some cases the raised exception instance itself may need to be
|
||||
inspected in order to determine if it is a retryable condition. The
|
||||
``giveup`` keyword arg can be used to specify a function which accepts
|
||||
the exception and returns a truthy value if the exception should not
|
||||
be retried:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def fatal_code(e):
|
||||
return 400 <= e.response.status_code < 500
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
max_time=300,
|
||||
giveup=fatal_code)
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
By default, when a give up event occurs, the exception in question is reraised
|
||||
and so code calling an `on_exception`-decorated function may still
|
||||
need to do exception handling. This behavior can optionally be disabled
|
||||
using the `raise_on_giveup` keyword argument.
|
||||
|
||||
In the code below, `requests.exceptions.RequestException` will not be raised
|
||||
when giveup occurs. Note that the decorated function will return `None` in this
|
||||
case, regardless of the logic in the `on_exception` handler.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def fatal_code(e):
|
||||
return 400 <= e.response.status_code < 500
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
max_time=300,
|
||||
raise_on_giveup=False,
|
||||
giveup=fatal_code)
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
This is useful for non-mission critical code where you still wish to retry
|
||||
the code inside of `backoff.on_exception` but wish to proceed with execution
|
||||
even if all retries fail.
|
||||
|
||||
@backoff.on_predicate
|
||||
---------------------
|
||||
|
||||
The ``on_predicate`` decorator is used to retry when a particular
|
||||
condition is true of the return value of the target function. This may
|
||||
be useful when polling a resource for externally generated content.
|
||||
|
||||
Here's an example which uses a fibonacci sequence backoff when the
|
||||
return value of the target function is the empty list:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_predicate(backoff.fibo, lambda x: x == [], max_value=13)
|
||||
def poll_for_messages(queue):
|
||||
return queue.get()
|
||||
|
||||
Extra keyword arguments are passed when initializing the
|
||||
wait generator, so the ``max_value`` param above is passed as a keyword
|
||||
arg when initializing the fibo generator.
|
||||
|
||||
When not specified, the predicate param defaults to the falsey test,
|
||||
so the above can more concisely be written:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_predicate(backoff.fibo, max_value=13)
|
||||
def poll_for_message(queue):
|
||||
return queue.get()
|
||||
|
||||
More simply, a function which continues polling every second until it
|
||||
gets a non-falsey result could be defined like like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_predicate(backoff.constant, jitter=None, interval=1)
|
||||
def poll_for_message(queue):
|
||||
return queue.get()
|
||||
|
||||
The jitter is disabled in order to keep the polling frequency fixed.
|
||||
|
||||
@backoff.runtime
|
||||
----------------
|
||||
|
||||
You can also use the ``backoff.runtime`` generator to make use of the
|
||||
return value or thrown exception of the decorated method.
|
||||
|
||||
For example, to use the value in the ``Retry-After`` header of the response:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_predicate(
|
||||
backoff.runtime,
|
||||
predicate=lambda r: r.status_code == 429,
|
||||
value=lambda r: int(r.headers.get("Retry-After")),
|
||||
jitter=None,
|
||||
)
|
||||
def get_url():
|
||||
return requests.get(url)
|
||||
|
||||
Jitter
|
||||
------
|
||||
|
||||
A jitter algorithm can be supplied with the ``jitter`` keyword arg to
|
||||
either of the backoff decorators. This argument should be a function
|
||||
accepting the original unadulterated backoff value and returning it's
|
||||
jittered counterpart.
|
||||
|
||||
As of version 1.2, the default jitter function ``backoff.full_jitter``
|
||||
implements the 'Full Jitter' algorithm as defined in the AWS
|
||||
Architecture Blog's `Exponential Backoff And Jitter
|
||||
<https://www.awsarchitectureblog.com/2015/03/backoff.html>`_ post.
|
||||
Note that with this algorithm, the time yielded by the wait generator
|
||||
is actually the *maximum* amount of time to wait.
|
||||
|
||||
Previous versions of backoff defaulted to adding some random number of
|
||||
milliseconds (up to 1s) to the raw sleep value. If desired, this
|
||||
behavior is now available as ``backoff.random_jitter``.
|
||||
|
||||
Using multiple decorators
|
||||
-------------------------
|
||||
|
||||
The backoff decorators may also be combined to specify different
|
||||
backoff behavior for different cases:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_predicate(backoff.fibo, max_value=13)
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.HTTPError,
|
||||
max_time=60)
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.Timeout,
|
||||
max_time=300)
|
||||
def poll_for_message(queue):
|
||||
return queue.get()
|
||||
|
||||
|
||||
Runtime Configuration
|
||||
---------------------
|
||||
|
||||
The decorator functions ``on_exception`` and ``on_predicate`` are
|
||||
generally evaluated at import time. This is fine when the keyword args
|
||||
are passed as constant values, but suppose we want to consult a
|
||||
dictionary with configuration options that only become available at
|
||||
runtime. The relevant values are not available at import time. Instead,
|
||||
decorator functions can be passed callables which are evaluated at
|
||||
runtime to obtain the value:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def lookup_max_time():
|
||||
# pretend we have a global reference to 'app' here
|
||||
# and that it has a dictionary-like 'config' property
|
||||
return app.config["BACKOFF_MAX_TIME"]
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
ValueError,
|
||||
max_time=lookup_max_time)
|
||||
|
||||
Event handlers
|
||||
--------------
|
||||
|
||||
Both backoff decorators optionally accept event handler functions
|
||||
using the keyword arguments ``on_success``, ``on_backoff``, and ``on_giveup``.
|
||||
This may be useful in reporting statistics or performing other custom
|
||||
logging.
|
||||
|
||||
Handlers must be callables with a unary signature accepting a dict
|
||||
argument. This dict contains the details of the invocation. Valid keys
|
||||
include:
|
||||
|
||||
* *target*: reference to the function or method being invoked
|
||||
* *args*: positional arguments to func
|
||||
* *kwargs*: keyword arguments to func
|
||||
* *tries*: number of invocation tries so far
|
||||
* *elapsed*: elapsed time in seconds so far
|
||||
* *wait*: seconds to wait (``on_backoff`` handler only)
|
||||
* *value*: value triggering backoff (``on_predicate`` decorator only)
|
||||
|
||||
A handler which prints the details of the backoff event could be
|
||||
implemented like so:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def backoff_hdlr(details):
|
||||
print ("Backing off {wait:0.1f} seconds after {tries} tries "
|
||||
"calling function {target} with args {args} and kwargs "
|
||||
"{kwargs}".format(**details))
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
on_backoff=backoff_hdlr)
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
**Multiple handlers per event type**
|
||||
|
||||
In all cases, iterables of handler functions are also accepted, which
|
||||
are called in turn. For example, you might provide a simple list of
|
||||
handler functions as the value of the ``on_backoff`` keyword arg:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
on_backoff=[backoff_hdlr1, backoff_hdlr2])
|
||||
def get_url(url):
|
||||
return requests.get(url)
|
||||
|
||||
**Getting exception info**
|
||||
|
||||
In the case of the ``on_exception`` decorator, all ``on_backoff`` and
|
||||
``on_giveup`` handlers are called from within the except block for the
|
||||
exception being handled. Therefore exception info is available to the
|
||||
handler functions via the python standard library, specifically
|
||||
``sys.exc_info()`` or the ``traceback`` module. The exception is also
|
||||
available at the *exception* key in the `details` dict passed to the
|
||||
handlers.
|
||||
|
||||
Asynchronous code
|
||||
-----------------
|
||||
|
||||
Backoff supports asynchronous execution in Python 3.5 and above.
|
||||
|
||||
To use backoff in asynchronous code based on
|
||||
`asyncio <https://docs.python.org/3/library/asyncio.html>`__
|
||||
you simply need to apply ``backoff.on_exception`` or ``backoff.on_predicate``
|
||||
to coroutines.
|
||||
You can also use coroutines for the ``on_success``, ``on_backoff``, and
|
||||
``on_giveup`` event handlers, with the interface otherwise being identical.
|
||||
|
||||
The following examples use `aiohttp <https://aiohttp.readthedocs.io/>`__
|
||||
asynchronous HTTP client/server library.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo, aiohttp.ClientError, max_time=60)
|
||||
async def get_url(url):
|
||||
async with aiohttp.ClientSession(raise_for_status=True) as session:
|
||||
async with session.get(url) as response:
|
||||
return await response.text()
|
||||
|
||||
Logging configuration
|
||||
---------------------
|
||||
|
||||
By default, backoff and retry attempts are logged to the 'backoff'
|
||||
logger. By default, this logger is configured with a NullHandler, so
|
||||
there will be nothing output unless you configure a handler.
|
||||
Programmatically, this might be accomplished with something as simple
|
||||
as:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
logging.getLogger('backoff').addHandler(logging.StreamHandler())
|
||||
|
||||
The default logging level is INFO, which corresponds to logging
|
||||
anytime a retry event occurs. If you would instead like to log
|
||||
only when a giveup event occurs, set the logger level to ERROR.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
logging.getLogger('backoff').setLevel(logging.ERROR)
|
||||
|
||||
It is also possible to specify an alternate logger with the ``logger``
|
||||
keyword argument. If a string value is specified the logger will be
|
||||
looked up by name.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
logger='my_logger')
|
||||
# ...
|
||||
|
||||
It is also supported to specify a Logger (or LoggerAdapter) object
|
||||
directly.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
my_logger = logging.getLogger('my_logger')
|
||||
my_handler = logging.StreamHandler()
|
||||
my_logger.addHandler(my_handler)
|
||||
my_logger.setLevel(logging.ERROR)
|
||||
|
||||
@backoff.on_exception(backoff.expo,
|
||||
requests.exceptions.RequestException,
|
||||
logger=my_logger)
|
||||
# ...
|
||||
|
||||
Default logging can be disabled all together by specifying
|
||||
``logger=None``. In this case, if desired alternative logging behavior
|
||||
could be defined by using custom event handlers.
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
backoff-2.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
backoff-2.2.1.dist-info/LICENSE,sha256=KmtNX4hNTXob8E6n3xlEzxKzLjWnmobQoHWi0_QPuaw,1077
|
||||
backoff-2.2.1.dist-info/METADATA,sha256=Wgffksy-dcDJ4GaoqXyjc8XxrE0DQz3FbWwmrDqo-6U,14827
|
||||
backoff-2.2.1.dist-info/RECORD,,
|
||||
backoff-2.2.1.dist-info/WHEEL,sha256=gSF7fibx4crkLz_A-IKR6kcuq0jJ64KNCkG8_bcaEao,88
|
||||
backoff/__init__.py,sha256=Jl49Ur_5GTiySyaw8URBXlfClWn0H7Pk5P95m1awNZ8,898
|
||||
backoff/__pycache__/__init__.cpython-311.pyc,,
|
||||
backoff/__pycache__/_async.cpython-311.pyc,,
|
||||
backoff/__pycache__/_common.cpython-311.pyc,,
|
||||
backoff/__pycache__/_decorator.cpython-311.pyc,,
|
||||
backoff/__pycache__/_jitter.cpython-311.pyc,,
|
||||
backoff/__pycache__/_sync.cpython-311.pyc,,
|
||||
backoff/__pycache__/_typing.cpython-311.pyc,,
|
||||
backoff/__pycache__/_wait_gen.cpython-311.pyc,,
|
||||
backoff/__pycache__/types.cpython-311.pyc,,
|
||||
backoff/_async.py,sha256=ZvqmfxxQ2o-UjQUQin12Ojc4eXOXb43RWSQaPaqbALI,6775
|
||||
backoff/_common.py,sha256=8s3_5AJH8hiHd9GR2PdKqiaeE2sdEUoyf6cW9OCo1F8,3478
|
||||
backoff/_decorator.py,sha256=EuYHrg8rSPaKJ_KeZ99WEg9knrfyn_-ck12Cwfcb68U,9804
|
||||
backoff/_jitter.py,sha256=LjJShpjryk9sWBCWiz-3UX1DJCx6rebNJ5Bf3nPMlYQ,782
|
||||
backoff/_sync.py,sha256=DT_ktufPPb0nut9WCAKe5UE7sTYRl9f93PRPX6jP8ro,4214
|
||||
backoff/_typing.py,sha256=RrJ50kqdeNZvSmoMjNoAUpMCm44V8qreAVgGb-KMl-g,1328
|
||||
backoff/_wait_gen.py,sha256=U5AR3Isf4aZs2SC0x9PGI3Wh8JR7XGkV90SiU56wWvw,2396
|
||||
backoff/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
backoff/types.py,sha256=4DGG6Ltcz0wVfXrk0YBOnp_oPpcki4c0BOnodRhgoqg,73
|
||||
@@ -0,0 +1,4 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: poetry-core 1.2.0
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
@@ -0,0 +1,30 @@
|
||||
# coding:utf-8
|
||||
"""
|
||||
Function decoration for backoff and retry
|
||||
|
||||
This module provides function decorators which can be used to wrap a
|
||||
function such that it will be retried until some condition is met. It
|
||||
is meant to be of use when accessing unreliable resources with the
|
||||
potential for intermittent failures i.e. network resources and external
|
||||
APIs. Somewhat more generally, it may also be of use for dynamically
|
||||
polling resources for externally generated content.
|
||||
|
||||
For examples and full documentation see the README at
|
||||
https://github.com/litl/backoff
|
||||
"""
|
||||
from backoff._decorator import on_exception, on_predicate
|
||||
from backoff._jitter import full_jitter, random_jitter
|
||||
from backoff._wait_gen import constant, expo, fibo, runtime
|
||||
|
||||
__all__ = [
|
||||
'on_predicate',
|
||||
'on_exception',
|
||||
'constant',
|
||||
'expo',
|
||||
'fibo',
|
||||
'runtime',
|
||||
'full_jitter',
|
||||
'random_jitter',
|
||||
]
|
||||
|
||||
__version__ = "2.2.1"
|
||||
@@ -0,0 +1,188 @@
|
||||
# coding:utf-8
|
||||
import datetime
|
||||
import functools
|
||||
import asyncio
|
||||
from datetime import timedelta
|
||||
|
||||
from backoff._common import (_init_wait_gen, _maybe_call, _next_wait)
|
||||
|
||||
|
||||
def _ensure_coroutine(coro_or_func):
|
||||
if asyncio.iscoroutinefunction(coro_or_func):
|
||||
return coro_or_func
|
||||
else:
|
||||
@functools.wraps(coro_or_func)
|
||||
async def f(*args, **kwargs):
|
||||
return coro_or_func(*args, **kwargs)
|
||||
return f
|
||||
|
||||
|
||||
def _ensure_coroutines(coros_or_funcs):
|
||||
return [_ensure_coroutine(f) for f in coros_or_funcs]
|
||||
|
||||
|
||||
async def _call_handlers(handlers,
|
||||
*,
|
||||
target, args, kwargs, tries, elapsed,
|
||||
**extra):
|
||||
details = {
|
||||
'target': target,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
'tries': tries,
|
||||
'elapsed': elapsed,
|
||||
}
|
||||
details.update(extra)
|
||||
for handler in handlers:
|
||||
await handler(details)
|
||||
|
||||
|
||||
def retry_predicate(target, wait_gen, predicate,
|
||||
*,
|
||||
max_tries, max_time, jitter,
|
||||
on_success, on_backoff, on_giveup,
|
||||
wait_gen_kwargs):
|
||||
on_success = _ensure_coroutines(on_success)
|
||||
on_backoff = _ensure_coroutines(on_backoff)
|
||||
on_giveup = _ensure_coroutines(on_giveup)
|
||||
|
||||
# Easy to implement, please report if you need this.
|
||||
assert not asyncio.iscoroutinefunction(max_tries)
|
||||
assert not asyncio.iscoroutinefunction(jitter)
|
||||
|
||||
assert asyncio.iscoroutinefunction(target)
|
||||
|
||||
@functools.wraps(target)
|
||||
async def retry(*args, **kwargs):
|
||||
|
||||
# update variables from outer function args
|
||||
max_tries_value = _maybe_call(max_tries)
|
||||
max_time_value = _maybe_call(max_time)
|
||||
|
||||
tries = 0
|
||||
start = datetime.datetime.now()
|
||||
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
|
||||
while True:
|
||||
tries += 1
|
||||
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
|
||||
details = {
|
||||
"target": target,
|
||||
"args": args,
|
||||
"kwargs": kwargs,
|
||||
"tries": tries,
|
||||
"elapsed": elapsed,
|
||||
}
|
||||
|
||||
ret = await target(*args, **kwargs)
|
||||
if predicate(ret):
|
||||
max_tries_exceeded = (tries == max_tries_value)
|
||||
max_time_exceeded = (max_time_value is not None and
|
||||
elapsed >= max_time_value)
|
||||
|
||||
if max_tries_exceeded or max_time_exceeded:
|
||||
await _call_handlers(on_giveup, **details, value=ret)
|
||||
break
|
||||
|
||||
try:
|
||||
seconds = _next_wait(wait, ret, jitter, elapsed,
|
||||
max_time_value)
|
||||
except StopIteration:
|
||||
await _call_handlers(on_giveup, **details, value=ret)
|
||||
break
|
||||
|
||||
await _call_handlers(on_backoff, **details, value=ret,
|
||||
wait=seconds)
|
||||
|
||||
# Note: there is no convenient way to pass explicit event
|
||||
# loop to decorator, so here we assume that either default
|
||||
# thread event loop is set and correct (it mostly is
|
||||
# by default), or Python >= 3.5.3 or Python >= 3.6 is used
|
||||
# where loop.get_event_loop() in coroutine guaranteed to
|
||||
# return correct value.
|
||||
# See for details:
|
||||
# <https://groups.google.com/forum/#!topic/python-tulip/yF9C-rFpiKk>
|
||||
# <https://bugs.python.org/issue28613>
|
||||
await asyncio.sleep(seconds)
|
||||
continue
|
||||
else:
|
||||
await _call_handlers(on_success, **details, value=ret)
|
||||
break
|
||||
|
||||
return ret
|
||||
|
||||
return retry
|
||||
|
||||
|
||||
def retry_exception(target, wait_gen, exception,
|
||||
*,
|
||||
max_tries, max_time, jitter, giveup,
|
||||
on_success, on_backoff, on_giveup, raise_on_giveup,
|
||||
wait_gen_kwargs):
|
||||
on_success = _ensure_coroutines(on_success)
|
||||
on_backoff = _ensure_coroutines(on_backoff)
|
||||
on_giveup = _ensure_coroutines(on_giveup)
|
||||
giveup = _ensure_coroutine(giveup)
|
||||
|
||||
# Easy to implement, please report if you need this.
|
||||
assert not asyncio.iscoroutinefunction(max_tries)
|
||||
assert not asyncio.iscoroutinefunction(jitter)
|
||||
|
||||
@functools.wraps(target)
|
||||
async def retry(*args, **kwargs):
|
||||
|
||||
max_tries_value = _maybe_call(max_tries)
|
||||
max_time_value = _maybe_call(max_time)
|
||||
|
||||
tries = 0
|
||||
start = datetime.datetime.now()
|
||||
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
|
||||
while True:
|
||||
tries += 1
|
||||
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
|
||||
details = {
|
||||
"target": target,
|
||||
"args": args,
|
||||
"kwargs": kwargs,
|
||||
"tries": tries,
|
||||
"elapsed": elapsed,
|
||||
}
|
||||
|
||||
try:
|
||||
ret = await target(*args, **kwargs)
|
||||
except exception as e:
|
||||
giveup_result = await giveup(e)
|
||||
max_tries_exceeded = (tries == max_tries_value)
|
||||
max_time_exceeded = (max_time_value is not None and
|
||||
elapsed >= max_time_value)
|
||||
|
||||
if giveup_result or max_tries_exceeded or max_time_exceeded:
|
||||
await _call_handlers(on_giveup, **details, exception=e)
|
||||
if raise_on_giveup:
|
||||
raise
|
||||
return None
|
||||
|
||||
try:
|
||||
seconds = _next_wait(wait, e, jitter, elapsed,
|
||||
max_time_value)
|
||||
except StopIteration:
|
||||
await _call_handlers(on_giveup, **details, exception=e)
|
||||
raise e
|
||||
|
||||
await _call_handlers(on_backoff, **details, wait=seconds,
|
||||
exception=e)
|
||||
|
||||
# Note: there is no convenient way to pass explicit event
|
||||
# loop to decorator, so here we assume that either default
|
||||
# thread event loop is set and correct (it mostly is
|
||||
# by default), or Python >= 3.5.3 or Python >= 3.6 is used
|
||||
# where loop.get_event_loop() in coroutine guaranteed to
|
||||
# return correct value.
|
||||
# See for details:
|
||||
# <https://groups.google.com/forum/#!topic/python-tulip/yF9C-rFpiKk>
|
||||
# <https://bugs.python.org/issue28613>
|
||||
await asyncio.sleep(seconds)
|
||||
else:
|
||||
await _call_handlers(on_success, **details)
|
||||
|
||||
return ret
|
||||
return retry
|
||||
@@ -0,0 +1,120 @@
|
||||
# coding:utf-8
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
|
||||
# Use module-specific logger with a default null handler.
|
||||
_logger = logging.getLogger('backoff')
|
||||
_logger.addHandler(logging.NullHandler()) # pragma: no cover
|
||||
_logger.setLevel(logging.INFO)
|
||||
|
||||
|
||||
# Evaluate arg that can be either a fixed value or a callable.
|
||||
def _maybe_call(f, *args, **kwargs):
|
||||
if callable(f):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except TypeError:
|
||||
return f
|
||||
else:
|
||||
return f
|
||||
|
||||
|
||||
def _init_wait_gen(wait_gen, wait_gen_kwargs):
|
||||
kwargs = {k: _maybe_call(v) for k, v in wait_gen_kwargs.items()}
|
||||
initialized = wait_gen(**kwargs)
|
||||
initialized.send(None) # Initialize with an empty send
|
||||
return initialized
|
||||
|
||||
|
||||
def _next_wait(wait, send_value, jitter, elapsed, max_time):
|
||||
value = wait.send(send_value)
|
||||
try:
|
||||
if jitter is not None:
|
||||
seconds = jitter(value)
|
||||
else:
|
||||
seconds = value
|
||||
except TypeError:
|
||||
warnings.warn(
|
||||
"Nullary jitter function signature is deprecated. Use "
|
||||
"unary signature accepting a wait value in seconds and "
|
||||
"returning a jittered version of it.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
seconds = value + jitter()
|
||||
|
||||
# don't sleep longer than remaining allotted max_time
|
||||
if max_time is not None:
|
||||
seconds = min(seconds, max_time - elapsed)
|
||||
|
||||
return seconds
|
||||
|
||||
|
||||
def _prepare_logger(logger):
|
||||
if isinstance(logger, str):
|
||||
logger = logging.getLogger(logger)
|
||||
return logger
|
||||
|
||||
|
||||
# Configure handler list with user specified handler and optionally
|
||||
# with a default handler bound to the specified logger.
|
||||
def _config_handlers(
|
||||
user_handlers, *, default_handler=None, logger=None, log_level=None
|
||||
):
|
||||
handlers = []
|
||||
if logger is not None:
|
||||
assert log_level is not None, "Log level is not specified"
|
||||
# bind the specified logger to the default log handler
|
||||
log_handler = functools.partial(
|
||||
default_handler, logger=logger, log_level=log_level
|
||||
)
|
||||
handlers.append(log_handler)
|
||||
|
||||
if user_handlers is None:
|
||||
return handlers
|
||||
|
||||
# user specified handlers can either be an iterable of handlers
|
||||
# or a single handler. either way append them to the list.
|
||||
if hasattr(user_handlers, '__iter__'):
|
||||
# add all handlers in the iterable
|
||||
handlers += list(user_handlers)
|
||||
else:
|
||||
# append a single handler
|
||||
handlers.append(user_handlers)
|
||||
|
||||
return handlers
|
||||
|
||||
|
||||
# Default backoff handler
|
||||
def _log_backoff(details, logger, log_level):
|
||||
msg = "Backing off %s(...) for %.1fs (%s)"
|
||||
log_args = [details['target'].__name__, details['wait']]
|
||||
|
||||
exc_typ, exc, _ = sys.exc_info()
|
||||
if exc is not None:
|
||||
exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1]
|
||||
log_args.append(exc_fmt.rstrip("\n"))
|
||||
else:
|
||||
log_args.append(details['value'])
|
||||
logger.log(log_level, msg, *log_args)
|
||||
|
||||
|
||||
# Default giveup handler
|
||||
def _log_giveup(details, logger, log_level):
|
||||
msg = "Giving up %s(...) after %d tries (%s)"
|
||||
log_args = [details['target'].__name__, details['tries']]
|
||||
|
||||
exc_typ, exc, _ = sys.exc_info()
|
||||
if exc is not None:
|
||||
exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1]
|
||||
log_args.append(exc_fmt.rstrip("\n"))
|
||||
else:
|
||||
log_args.append(details['value'])
|
||||
|
||||
logger.log(log_level, msg, *log_args)
|
||||
@@ -0,0 +1,222 @@
|
||||
# coding:utf-8
|
||||
import asyncio
|
||||
import logging
|
||||
import operator
|
||||
from typing import Any, Callable, Iterable, Optional, Type, Union
|
||||
|
||||
from backoff._common import (
|
||||
_prepare_logger,
|
||||
_config_handlers,
|
||||
_log_backoff,
|
||||
_log_giveup
|
||||
)
|
||||
from backoff._jitter import full_jitter
|
||||
from backoff import _async, _sync
|
||||
from backoff._typing import (
|
||||
_CallableT,
|
||||
_Handler,
|
||||
_Jitterer,
|
||||
_MaybeCallable,
|
||||
_MaybeLogger,
|
||||
_MaybeSequence,
|
||||
_Predicate,
|
||||
_WaitGenerator,
|
||||
)
|
||||
|
||||
|
||||
def on_predicate(wait_gen: _WaitGenerator,
|
||||
predicate: _Predicate[Any] = operator.not_,
|
||||
*,
|
||||
max_tries: Optional[_MaybeCallable[int]] = None,
|
||||
max_time: Optional[_MaybeCallable[float]] = None,
|
||||
jitter: Union[_Jitterer, None] = full_jitter,
|
||||
on_success: Union[_Handler, Iterable[_Handler], None] = None,
|
||||
on_backoff: Union[_Handler, Iterable[_Handler], None] = None,
|
||||
on_giveup: Union[_Handler, Iterable[_Handler], None] = None,
|
||||
logger: _MaybeLogger = 'backoff',
|
||||
backoff_log_level: int = logging.INFO,
|
||||
giveup_log_level: int = logging.ERROR,
|
||||
**wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
|
||||
"""Returns decorator for backoff and retry triggered by predicate.
|
||||
|
||||
Args:
|
||||
wait_gen: A generator yielding successive wait times in
|
||||
seconds.
|
||||
predicate: A function which when called on the return value of
|
||||
the target function will trigger backoff when considered
|
||||
truthily. If not specified, the default behavior is to
|
||||
backoff on falsey return values.
|
||||
max_tries: The maximum number of attempts to make before giving
|
||||
up. In the case of failure, the result of the last attempt
|
||||
will be returned. The default value of None means there
|
||||
is no limit to the number of tries. If a callable is passed,
|
||||
it will be evaluated at runtime and its return value used.
|
||||
max_time: The maximum total amount of time to try for before
|
||||
giving up. If this time expires, the result of the last
|
||||
attempt will be returned. If a callable is passed, it will
|
||||
be evaluated at runtime and its return value used.
|
||||
jitter: A function of the value yielded by wait_gen returning
|
||||
the actual time to wait. This distributes wait times
|
||||
stochastically in order to avoid timing collisions across
|
||||
concurrent clients. Wait times are jittered by default
|
||||
using the full_jitter function. Jittering may be disabled
|
||||
altogether by passing jitter=None.
|
||||
on_success: Callable (or iterable of callables) with a unary
|
||||
signature to be called in the event of success. The
|
||||
parameter is a dict containing details about the invocation.
|
||||
on_backoff: Callable (or iterable of callables) with a unary
|
||||
signature to be called in the event of a backoff. The
|
||||
parameter is a dict containing details about the invocation.
|
||||
on_giveup: Callable (or iterable of callables) with a unary
|
||||
signature to be called in the event that max_tries
|
||||
is exceeded. The parameter is a dict containing details
|
||||
about the invocation.
|
||||
logger: Name of logger or Logger object to log to. Defaults to
|
||||
'backoff'.
|
||||
backoff_log_level: log level for the backoff event. Defaults to "INFO"
|
||||
giveup_log_level: log level for the give up event. Defaults to "ERROR"
|
||||
**wait_gen_kwargs: Any additional keyword args specified will be
|
||||
passed to wait_gen when it is initialized. Any callable
|
||||
args will first be evaluated and their return values passed.
|
||||
This is useful for runtime configuration.
|
||||
"""
|
||||
def decorate(target):
|
||||
nonlocal logger, on_success, on_backoff, on_giveup
|
||||
|
||||
logger = _prepare_logger(logger)
|
||||
on_success = _config_handlers(on_success)
|
||||
on_backoff = _config_handlers(
|
||||
on_backoff,
|
||||
default_handler=_log_backoff,
|
||||
logger=logger,
|
||||
log_level=backoff_log_level
|
||||
)
|
||||
on_giveup = _config_handlers(
|
||||
on_giveup,
|
||||
default_handler=_log_giveup,
|
||||
logger=logger,
|
||||
log_level=giveup_log_level
|
||||
)
|
||||
|
||||
if asyncio.iscoroutinefunction(target):
|
||||
retry = _async.retry_predicate
|
||||
else:
|
||||
retry = _sync.retry_predicate
|
||||
|
||||
return retry(
|
||||
target,
|
||||
wait_gen,
|
||||
predicate,
|
||||
max_tries=max_tries,
|
||||
max_time=max_time,
|
||||
jitter=jitter,
|
||||
on_success=on_success,
|
||||
on_backoff=on_backoff,
|
||||
on_giveup=on_giveup,
|
||||
wait_gen_kwargs=wait_gen_kwargs
|
||||
)
|
||||
|
||||
# Return a function which decorates a target with a retry loop.
|
||||
return decorate
|
||||
|
||||
|
||||
def on_exception(wait_gen: _WaitGenerator,
|
||||
exception: _MaybeSequence[Type[Exception]],
|
||||
*,
|
||||
max_tries: Optional[_MaybeCallable[int]] = None,
|
||||
max_time: Optional[_MaybeCallable[float]] = None,
|
||||
jitter: Union[_Jitterer, None] = full_jitter,
|
||||
giveup: _Predicate[Exception] = lambda e: False,
|
||||
on_success: Union[_Handler, Iterable[_Handler], None] = None,
|
||||
on_backoff: Union[_Handler, Iterable[_Handler], None] = None,
|
||||
on_giveup: Union[_Handler, Iterable[_Handler], None] = None,
|
||||
raise_on_giveup: bool = True,
|
||||
logger: _MaybeLogger = 'backoff',
|
||||
backoff_log_level: int = logging.INFO,
|
||||
giveup_log_level: int = logging.ERROR,
|
||||
**wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
|
||||
"""Returns decorator for backoff and retry triggered by exception.
|
||||
|
||||
Args:
|
||||
wait_gen: A generator yielding successive wait times in
|
||||
seconds.
|
||||
exception: An exception type (or tuple of types) which triggers
|
||||
backoff.
|
||||
max_tries: The maximum number of attempts to make before giving
|
||||
up. Once exhausted, the exception will be allowed to escape.
|
||||
The default value of None means there is no limit to the
|
||||
number of tries. If a callable is passed, it will be
|
||||
evaluated at runtime and its return value used.
|
||||
max_time: The maximum total amount of time to try for before
|
||||
giving up. Once expired, the exception will be allowed to
|
||||
escape. If a callable is passed, it will be
|
||||
evaluated at runtime and its return value used.
|
||||
jitter: A function of the value yielded by wait_gen returning
|
||||
the actual time to wait. This distributes wait times
|
||||
stochastically in order to avoid timing collisions across
|
||||
concurrent clients. Wait times are jittered by default
|
||||
using the full_jitter function. Jittering may be disabled
|
||||
altogether by passing jitter=None.
|
||||
giveup: Function accepting an exception instance and
|
||||
returning whether or not to give up. Optional. The default
|
||||
is to always continue.
|
||||
on_success: Callable (or iterable of callables) with a unary
|
||||
signature to be called in the event of success. The
|
||||
parameter is a dict containing details about the invocation.
|
||||
on_backoff: Callable (or iterable of callables) with a unary
|
||||
signature to be called in the event of a backoff. The
|
||||
parameter is a dict containing details about the invocation.
|
||||
on_giveup: Callable (or iterable of callables) with a unary
|
||||
signature to be called in the event that max_tries
|
||||
is exceeded. The parameter is a dict containing details
|
||||
about the invocation.
|
||||
raise_on_giveup: Boolean indicating whether the registered exceptions
|
||||
should be raised on giveup. Defaults to `True`
|
||||
logger: Name or Logger object to log to. Defaults to 'backoff'.
|
||||
backoff_log_level: log level for the backoff event. Defaults to "INFO"
|
||||
giveup_log_level: log level for the give up event. Defaults to "ERROR"
|
||||
**wait_gen_kwargs: Any additional keyword args specified will be
|
||||
passed to wait_gen when it is initialized. Any callable
|
||||
args will first be evaluated and their return values passed.
|
||||
This is useful for runtime configuration.
|
||||
"""
|
||||
def decorate(target):
|
||||
nonlocal logger, on_success, on_backoff, on_giveup
|
||||
|
||||
logger = _prepare_logger(logger)
|
||||
on_success = _config_handlers(on_success)
|
||||
on_backoff = _config_handlers(
|
||||
on_backoff,
|
||||
default_handler=_log_backoff,
|
||||
logger=logger,
|
||||
log_level=backoff_log_level,
|
||||
)
|
||||
on_giveup = _config_handlers(
|
||||
on_giveup,
|
||||
default_handler=_log_giveup,
|
||||
logger=logger,
|
||||
log_level=giveup_log_level,
|
||||
)
|
||||
|
||||
if asyncio.iscoroutinefunction(target):
|
||||
retry = _async.retry_exception
|
||||
else:
|
||||
retry = _sync.retry_exception
|
||||
|
||||
return retry(
|
||||
target,
|
||||
wait_gen,
|
||||
exception,
|
||||
max_tries=max_tries,
|
||||
max_time=max_time,
|
||||
jitter=jitter,
|
||||
giveup=giveup,
|
||||
on_success=on_success,
|
||||
on_backoff=on_backoff,
|
||||
on_giveup=on_giveup,
|
||||
raise_on_giveup=raise_on_giveup,
|
||||
wait_gen_kwargs=wait_gen_kwargs
|
||||
)
|
||||
|
||||
# Return a function which decorates a target with a retry loop.
|
||||
return decorate
|
||||
@@ -0,0 +1,28 @@
|
||||
# coding:utf-8
|
||||
|
||||
import random
|
||||
|
||||
|
||||
def random_jitter(value: float) -> float:
|
||||
"""Jitter the value a random number of milliseconds.
|
||||
|
||||
This adds up to 1 second of additional time to the original value.
|
||||
Prior to backoff version 1.2 this was the default jitter behavior.
|
||||
|
||||
Args:
|
||||
value: The unadulterated backoff value.
|
||||
"""
|
||||
return value + random.random()
|
||||
|
||||
|
||||
def full_jitter(value: float) -> float:
|
||||
"""Jitter the value across the full range (0 to value).
|
||||
|
||||
This corresponds to the "Full Jitter" algorithm specified in the
|
||||
AWS blog's post on the performance of various jitter algorithms.
|
||||
(http://www.awsarchitectureblog.com/2015/03/backoff.html)
|
||||
|
||||
Args:
|
||||
value: The unadulterated backoff value.
|
||||
"""
|
||||
return random.uniform(0, value)
|
||||
@@ -0,0 +1,132 @@
|
||||
# coding:utf-8
|
||||
import datetime
|
||||
import functools
|
||||
import time
|
||||
from datetime import timedelta
|
||||
|
||||
from backoff._common import (_init_wait_gen, _maybe_call, _next_wait)
|
||||
|
||||
|
||||
def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra):
|
||||
details = {
|
||||
'target': target,
|
||||
'args': args,
|
||||
'kwargs': kwargs,
|
||||
'tries': tries,
|
||||
'elapsed': elapsed,
|
||||
}
|
||||
details.update(extra)
|
||||
for hdlr in hdlrs:
|
||||
hdlr(details)
|
||||
|
||||
|
||||
def retry_predicate(target, wait_gen, predicate,
|
||||
*,
|
||||
max_tries, max_time, jitter,
|
||||
on_success, on_backoff, on_giveup,
|
||||
wait_gen_kwargs):
|
||||
|
||||
@functools.wraps(target)
|
||||
def retry(*args, **kwargs):
|
||||
max_tries_value = _maybe_call(max_tries)
|
||||
max_time_value = _maybe_call(max_time)
|
||||
|
||||
tries = 0
|
||||
start = datetime.datetime.now()
|
||||
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
|
||||
while True:
|
||||
tries += 1
|
||||
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
|
||||
details = {
|
||||
"target": target,
|
||||
"args": args,
|
||||
"kwargs": kwargs,
|
||||
"tries": tries,
|
||||
"elapsed": elapsed,
|
||||
}
|
||||
|
||||
ret = target(*args, **kwargs)
|
||||
if predicate(ret):
|
||||
max_tries_exceeded = (tries == max_tries_value)
|
||||
max_time_exceeded = (max_time_value is not None and
|
||||
elapsed >= max_time_value)
|
||||
|
||||
if max_tries_exceeded or max_time_exceeded:
|
||||
_call_handlers(on_giveup, **details, value=ret)
|
||||
break
|
||||
|
||||
try:
|
||||
seconds = _next_wait(wait, ret, jitter, elapsed,
|
||||
max_time_value)
|
||||
except StopIteration:
|
||||
_call_handlers(on_giveup, **details)
|
||||
break
|
||||
|
||||
_call_handlers(on_backoff, **details,
|
||||
value=ret, wait=seconds)
|
||||
|
||||
time.sleep(seconds)
|
||||
continue
|
||||
else:
|
||||
_call_handlers(on_success, **details, value=ret)
|
||||
break
|
||||
|
||||
return ret
|
||||
|
||||
return retry
|
||||
|
||||
|
||||
def retry_exception(target, wait_gen, exception,
|
||||
*,
|
||||
max_tries, max_time, jitter, giveup,
|
||||
on_success, on_backoff, on_giveup, raise_on_giveup,
|
||||
wait_gen_kwargs):
|
||||
|
||||
@functools.wraps(target)
|
||||
def retry(*args, **kwargs):
|
||||
max_tries_value = _maybe_call(max_tries)
|
||||
max_time_value = _maybe_call(max_time)
|
||||
|
||||
tries = 0
|
||||
start = datetime.datetime.now()
|
||||
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
|
||||
while True:
|
||||
tries += 1
|
||||
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
|
||||
details = {
|
||||
"target": target,
|
||||
"args": args,
|
||||
"kwargs": kwargs,
|
||||
"tries": tries,
|
||||
"elapsed": elapsed,
|
||||
}
|
||||
|
||||
try:
|
||||
ret = target(*args, **kwargs)
|
||||
except exception as e:
|
||||
max_tries_exceeded = (tries == max_tries_value)
|
||||
max_time_exceeded = (max_time_value is not None and
|
||||
elapsed >= max_time_value)
|
||||
|
||||
if giveup(e) or max_tries_exceeded or max_time_exceeded:
|
||||
_call_handlers(on_giveup, **details, exception=e)
|
||||
if raise_on_giveup:
|
||||
raise
|
||||
return None
|
||||
|
||||
try:
|
||||
seconds = _next_wait(wait, e, jitter, elapsed,
|
||||
max_time_value)
|
||||
except StopIteration:
|
||||
_call_handlers(on_giveup, **details, exception=e)
|
||||
raise e
|
||||
|
||||
_call_handlers(on_backoff, **details, wait=seconds,
|
||||
exception=e)
|
||||
|
||||
time.sleep(seconds)
|
||||
else:
|
||||
_call_handlers(on_success, **details)
|
||||
|
||||
return ret
|
||||
return retry
|
||||
@@ -0,0 +1,44 @@
|
||||
# coding:utf-8
|
||||
import logging
|
||||
import sys
|
||||
from typing import (Any, Callable, Coroutine, Dict, Generator, Sequence, Tuple,
|
||||
TypeVar, Union)
|
||||
|
||||
if sys.version_info >= (3, 8): # pragma: no cover
|
||||
from typing import TypedDict
|
||||
else: # pragma: no cover
|
||||
# use typing_extensions if installed but don't require it
|
||||
try:
|
||||
from typing_extensions import TypedDict
|
||||
except ImportError:
|
||||
class TypedDict(dict):
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
return super().__init_subclass__()
|
||||
|
||||
|
||||
class _Details(TypedDict):
|
||||
target: Callable[..., Any]
|
||||
args: Tuple[Any, ...]
|
||||
kwargs: Dict[str, Any]
|
||||
tries: int
|
||||
elapsed: float
|
||||
|
||||
|
||||
class Details(_Details, total=False):
|
||||
wait: float # present in the on_backoff handler case for either decorator
|
||||
value: Any # present in the on_predicate decorator case
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
_CallableT = TypeVar('_CallableT', bound=Callable[..., Any])
|
||||
_Handler = Union[
|
||||
Callable[[Details], None],
|
||||
Callable[[Details], Coroutine[Any, Any, None]],
|
||||
]
|
||||
_Jitterer = Callable[[float], float]
|
||||
_MaybeCallable = Union[T, Callable[[], T]]
|
||||
_MaybeLogger = Union[str, logging.Logger, None]
|
||||
_MaybeSequence = Union[T, Sequence[T]]
|
||||
_Predicate = Callable[[T], bool]
|
||||
_WaitGenerator = Callable[..., Generator[float, None, None]]
|
||||
@@ -0,0 +1,89 @@
|
||||
# coding:utf-8
|
||||
|
||||
import itertools
|
||||
from typing import Any, Callable, Generator, Iterable, Optional, Union
|
||||
|
||||
|
||||
def expo(
|
||||
base: float = 2,
|
||||
factor: float = 1,
|
||||
max_value: Optional[float] = None
|
||||
) -> Generator[float, Any, None]:
|
||||
|
||||
"""Generator for exponential decay.
|
||||
|
||||
Args:
|
||||
base: The mathematical base of the exponentiation operation
|
||||
factor: Factor to multiply the exponentiation by.
|
||||
max_value: The maximum value to yield. Once the value in the
|
||||
true exponential sequence exceeds this, the value
|
||||
of max_value will forever after be yielded.
|
||||
"""
|
||||
# Advance past initial .send() call
|
||||
yield # type: ignore[misc]
|
||||
n = 0
|
||||
while True:
|
||||
a = factor * base ** n
|
||||
if max_value is None or a < max_value:
|
||||
yield a
|
||||
n += 1
|
||||
else:
|
||||
yield max_value
|
||||
|
||||
|
||||
def fibo(max_value: Optional[int] = None) -> Generator[int, None, None]:
|
||||
"""Generator for fibonaccial decay.
|
||||
|
||||
Args:
|
||||
max_value: The maximum value to yield. Once the value in the
|
||||
true fibonacci sequence exceeds this, the value
|
||||
of max_value will forever after be yielded.
|
||||
"""
|
||||
# Advance past initial .send() call
|
||||
yield # type: ignore[misc]
|
||||
|
||||
a = 1
|
||||
b = 1
|
||||
while True:
|
||||
if max_value is None or a < max_value:
|
||||
yield a
|
||||
a, b = b, a + b
|
||||
else:
|
||||
yield max_value
|
||||
|
||||
|
||||
def constant(
|
||||
interval: Union[int, Iterable[float]] = 1
|
||||
) -> Generator[float, None, None]:
|
||||
"""Generator for constant intervals.
|
||||
|
||||
Args:
|
||||
interval: A constant value to yield or an iterable of such values.
|
||||
"""
|
||||
# Advance past initial .send() call
|
||||
yield # type: ignore[misc]
|
||||
|
||||
try:
|
||||
itr = iter(interval) # type: ignore
|
||||
except TypeError:
|
||||
itr = itertools.repeat(interval) # type: ignore
|
||||
|
||||
for val in itr:
|
||||
yield val
|
||||
|
||||
|
||||
def runtime(
|
||||
*,
|
||||
value: Callable[[Any], float]
|
||||
) -> Generator[float, None, None]:
|
||||
"""Generator that is based on parsing the return value or thrown
|
||||
exception of the decorated method
|
||||
|
||||
Args:
|
||||
value: a callable which takes as input the decorated
|
||||
function's return value or thrown exception and
|
||||
determines how long to wait
|
||||
"""
|
||||
ret_or_exc = yield # type: ignore[misc]
|
||||
while True:
|
||||
ret_or_exc = yield value(ret_or_exc)
|
||||
@@ -0,0 +1,6 @@
|
||||
# coding:utf-8
|
||||
from ._typing import Details
|
||||
|
||||
__all__ = [
|
||||
'Details'
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,54 @@
|
||||
#!C:\Program Files (x86)\Steam\steamapps\common\Blender\4.5\python\bin\python.exe
|
||||
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
from pprint import pformat
|
||||
|
||||
import jmespath
|
||||
from jmespath import exceptions
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('expression')
|
||||
parser.add_argument('-f', '--filename',
|
||||
help=('The filename containing the input data. '
|
||||
'If a filename is not given then data is '
|
||||
'read from stdin.'))
|
||||
parser.add_argument('--ast', action='store_true',
|
||||
help=('Pretty print the AST, do not search the data.'))
|
||||
args = parser.parse_args()
|
||||
expression = args.expression
|
||||
if args.ast:
|
||||
# Only print the AST
|
||||
expression = jmespath.compile(args.expression)
|
||||
sys.stdout.write(pformat(expression.parsed))
|
||||
sys.stdout.write('\n')
|
||||
return 0
|
||||
if args.filename:
|
||||
with open(args.filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
else:
|
||||
data = sys.stdin.read()
|
||||
data = json.loads(data)
|
||||
try:
|
||||
sys.stdout.write(json.dumps(
|
||||
jmespath.search(expression, data), indent=4, ensure_ascii=False))
|
||||
sys.stdout.write('\n')
|
||||
except exceptions.ArityError as e:
|
||||
sys.stderr.write("invalid-arity: %s\n" % e)
|
||||
return 1
|
||||
except exceptions.JMESPathTypeError as e:
|
||||
sys.stderr.write("invalid-type: %s\n" % e)
|
||||
return 1
|
||||
except exceptions.UnknownFunctionError as e:
|
||||
sys.stderr.write("unknown-function: %s\n" % e)
|
||||
return 1
|
||||
except exceptions.ParseError as e:
|
||||
sys.stderr.write("syntax-error: %s\n" % e)
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
pip
|
||||
@@ -0,0 +1,177 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
@@ -0,0 +1,181 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: boto3
|
||||
Version: 1.39.17
|
||||
Summary: The AWS SDK for Python
|
||||
Home-page: https://github.com/boto/boto3
|
||||
Author: Amazon Web Services
|
||||
License: Apache License 2.0
|
||||
Project-URL: Documentation, https://boto3.amazonaws.com/v1/documentation/api/latest/index.html
|
||||
Project-URL: Source, https://github.com/boto/boto3
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Natural Language :: English
|
||||
Classifier: License :: OSI Approved :: Apache Software License
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Classifier: Programming Language :: Python :: 3.14
|
||||
Requires-Python: >= 3.9
|
||||
License-File: LICENSE
|
||||
License-File: NOTICE
|
||||
Requires-Dist: botocore (<1.40.0,>=1.39.17)
|
||||
Requires-Dist: jmespath (<2.0.0,>=0.7.1)
|
||||
Requires-Dist: s3transfer (<0.14.0,>=0.13.0)
|
||||
Provides-Extra: crt
|
||||
Requires-Dist: botocore[crt] (<2.0a0,>=1.21.0) ; extra == 'crt'
|
||||
|
||||
===============================
|
||||
Boto3 - The AWS SDK for Python
|
||||
===============================
|
||||
|
||||
|Version| |Python| |License|
|
||||
|
||||
Boto3 is the Amazon Web Services (AWS) Software Development Kit (SDK) for
|
||||
Python, which allows Python developers to write software that makes use
|
||||
of services like Amazon S3 and Amazon EC2. You can find the latest, most
|
||||
up to date, documentation at our `doc site`_, including a list of
|
||||
services that are supported.
|
||||
|
||||
Boto3 is maintained and published by `Amazon Web Services`_.
|
||||
|
||||
Boto (pronounced boh-toh) was named after the fresh water dolphin native to the Amazon river. The name was chosen by the author of the original Boto library, Mitch Garnaat, as a reference to the company.
|
||||
|
||||
Notices
|
||||
-------
|
||||
|
||||
On 2025-04-22, support for Python 3.8 ended for Boto3. This follows the
|
||||
Python Software Foundation `end of support <https://peps.python.org/pep-0569/#lifespan>`__
|
||||
for the runtime which occurred on 2024-10-07.
|
||||
For more information, see this `blog post <https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/>`__.
|
||||
|
||||
.. _boto: https://docs.pythonboto.org/
|
||||
.. _`doc site`: https://boto3.amazonaws.com/v1/documentation/api/latest/index.html
|
||||
.. _`Amazon Web Services`: https://aws.amazon.com/what-is-aws/
|
||||
.. |Python| image:: https://img.shields.io/pypi/pyversions/boto3.svg?style=flat
|
||||
:target: https://pypi.python.org/pypi/boto3/
|
||||
:alt: Python Versions
|
||||
.. |Version| image:: http://img.shields.io/pypi/v/boto3.svg?style=flat
|
||||
:target: https://pypi.python.org/pypi/boto3/
|
||||
:alt: Package Version
|
||||
.. |License| image:: http://img.shields.io/pypi/l/boto3.svg?style=flat
|
||||
:target: https://github.com/boto/boto3/blob/develop/LICENSE
|
||||
:alt: License
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
Assuming that you have a supported version of Python installed, you can first
|
||||
set up your environment with:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$ python -m venv .venv
|
||||
...
|
||||
$ . .venv/bin/activate
|
||||
|
||||
Then, you can install boto3 from PyPI with:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$ python -m pip install boto3
|
||||
|
||||
or install from source with:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$ git clone https://github.com/boto/boto3.git
|
||||
$ cd boto3
|
||||
$ python -m pip install -r requirements.txt
|
||||
$ python -m pip install -e .
|
||||
|
||||
|
||||
Using Boto3
|
||||
~~~~~~~~~~~~~~
|
||||
After installing boto3
|
||||
|
||||
Next, set up credentials (in e.g. ``~/.aws/credentials``):
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[default]
|
||||
aws_access_key_id = YOUR_KEY
|
||||
aws_secret_access_key = YOUR_SECRET
|
||||
|
||||
Then, set up a default region (in e.g. ``~/.aws/config``):
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[default]
|
||||
region=us-east-1
|
||||
|
||||
Other credential configuration methods can be found `here <https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html>`__
|
||||
|
||||
Then, from a Python interpreter:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> import boto3
|
||||
>>> s3 = boto3.resource('s3')
|
||||
>>> for bucket in s3.buckets.all():
|
||||
print(bucket.name)
|
||||
|
||||
Running Tests
|
||||
~~~~~~~~~~~~~
|
||||
You can run tests in all supported Python versions using ``tox``. By default,
|
||||
it will run all of the unit and functional tests, but you can also specify your own
|
||||
``pytest`` options. Note that this requires that you have all supported
|
||||
versions of Python installed, otherwise you must pass ``-e`` or run the
|
||||
``pytest`` command directly:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$ tox
|
||||
$ tox -- unit/test_session.py
|
||||
$ tox -e py26,py33 -- integration/
|
||||
|
||||
You can also run individual tests with your default Python version:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
$ pytest tests/unit
|
||||
|
||||
|
||||
Getting Help
|
||||
------------
|
||||
|
||||
We use GitHub issues for tracking bugs and feature requests and have limited
|
||||
bandwidth to address them. Please use these community resources for getting
|
||||
help:
|
||||
|
||||
* Ask a question on `Stack Overflow <https://stackoverflow.com/>`__ and tag it with `boto3 <https://stackoverflow.com/questions/tagged/boto3>`__
|
||||
* Open a support ticket with `AWS Support <https://console.aws.amazon.com/support/home#/>`__
|
||||
* If it turns out that you may have found a bug, please `open an issue <https://github.com/boto/boto3/issues/new>`__
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING <https://github.com/boto/boto3/blob/develop/CONTRIBUTING.rst>`__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution.
|
||||
|
||||
|
||||
Maintenance and Support for SDK Major Versions
|
||||
----------------------------------------------
|
||||
|
||||
Boto3 was made generally available on 06/22/2015 and is currently in the full support phase of the availability life cycle.
|
||||
|
||||
For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide:
|
||||
|
||||
* `AWS SDKs and Tools Maintenance Policy <https://docs.aws.amazon.com/sdkref/latest/guide/maint-policy.html>`__
|
||||
* `AWS SDKs and Tools Version Support Matrix <https://docs.aws.amazon.com/sdkref/latest/guide/version-support-matrix.html>`__
|
||||
|
||||
|
||||
More Resources
|
||||
--------------
|
||||
|
||||
* `NOTICE <https://github.com/boto/boto3/blob/develop/NOTICE>`__
|
||||
* `Changelog <https://github.com/boto/boto3/blob/develop/CHANGELOG.rst>`__
|
||||
* `License <https://github.com/boto/boto3/blob/develop/LICENSE>`__
|
||||
@@ -0,0 +1,2 @@
|
||||
boto3
|
||||
Copyright 2013-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
@@ -0,0 +1,104 @@
|
||||
boto3-1.39.17.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
boto3-1.39.17.dist-info/LICENSE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174
|
||||
boto3-1.39.17.dist-info/METADATA,sha256=L0aj-u1_zeTFy9P2tz0qQ4heDfpgqIXnQMFwhpGbdzs,6652
|
||||
boto3-1.39.17.dist-info/NOTICE,sha256=BPseYUhKeBDxugm7QrwByljJrzOSfXxaIVVuTE0cf6Q,83
|
||||
boto3-1.39.17.dist-info/RECORD,,
|
||||
boto3-1.39.17.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
boto3-1.39.17.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
||||
boto3-1.39.17.dist-info/top_level.txt,sha256=MP6_SI1GcPseXodd3Ykt5F_mCBsrUksiziLxjEZKGUU,6
|
||||
boto3/__init__.py,sha256=dSgSxEWEkRTmuCl9Av-gaZ9v6wnaKLaNqceHGBAZXQU,3367
|
||||
boto3/__pycache__/__init__.cpython-311.pyc,,
|
||||
boto3/__pycache__/compat.cpython-311.pyc,,
|
||||
boto3/__pycache__/crt.cpython-311.pyc,,
|
||||
boto3/__pycache__/exceptions.cpython-311.pyc,,
|
||||
boto3/__pycache__/session.cpython-311.pyc,,
|
||||
boto3/__pycache__/utils.cpython-311.pyc,,
|
||||
boto3/compat.py,sha256=RAG9ngSS-4mBf0JZONqgYyjEfb3Zy5ewfPGYLn51jcU,3083
|
||||
boto3/crt.py,sha256=VFstUtHMZrZ6eHJJ-YdXb4vqfIOcHbv1l51fdeY5cS0,5407
|
||||
boto3/data/cloudformation/2010-05-15/resources-1.json,sha256=5mFVKJVtbVoHyPdHSyNfZ5mpkgCAws5PhnveSu4qzdI,5110
|
||||
boto3/data/cloudwatch/2010-08-01/resources-1.json,sha256=q4AgE8F4pbscd-2U3NYSGAzK55zpMyOQGr83JUxbZXI,11690
|
||||
boto3/data/dynamodb/2012-08-10/resources-1.json,sha256=hBLa1Jt7bdT557U9A7UcSi8SCpONKzdbtDRTzjM1-Y0,3849
|
||||
boto3/data/ec2/2014-10-01/resources-1.json,sha256=tMG1AMYP2ksnPWY6-3l8DB-EhKsSNtAO9YHhvHqBKu0,68469
|
||||
boto3/data/ec2/2015-03-01/resources-1.json,sha256=tMG1AMYP2ksnPWY6-3l8DB-EhKsSNtAO9YHhvHqBKu0,68469
|
||||
boto3/data/ec2/2015-04-15/resources-1.json,sha256=tMG1AMYP2ksnPWY6-3l8DB-EhKsSNtAO9YHhvHqBKu0,68469
|
||||
boto3/data/ec2/2015-10-01/resources-1.json,sha256=SOfYX2c1KgvnxMO2FCdJpV42rJWNMwVhlFAXhvUPTzA,76564
|
||||
boto3/data/ec2/2016-04-01/resources-1.json,sha256=SOfYX2c1KgvnxMO2FCdJpV42rJWNMwVhlFAXhvUPTzA,76564
|
||||
boto3/data/ec2/2016-09-15/resources-1.json,sha256=SOfYX2c1KgvnxMO2FCdJpV42rJWNMwVhlFAXhvUPTzA,76564
|
||||
boto3/data/ec2/2016-11-15/resources-1.json,sha256=vx7YiL-sUvBFeo4SZ81G7Qa2Hy-y6xY4z2YlSx7_wEw,76922
|
||||
boto3/data/glacier/2012-06-01/resources-1.json,sha256=GT5qWQLGeXtrHgTDNG23Mrpyweg6O0Udgd139BuNTVs,19940
|
||||
boto3/data/iam/2010-05-08/resources-1.json,sha256=PsOT9yBqSJtluBFHCVRsg6k6Ly2VkSYODnYxSl0DVOc,50357
|
||||
boto3/data/opsworks/2013-02-18/resources-1.json,sha256=Y6ygEyegsbYA1gGZn-Ad2yuDd3jUCOt2UKrW_b2YBeM,4136
|
||||
boto3/data/s3/2006-03-01/resources-1.json,sha256=VeKALhMRqv7fyDHMLOM5_RzXUEuDdg_n6OIRi3sdB-o,37204
|
||||
boto3/data/sns/2010-03-31/resources-1.json,sha256=7zmKQhafgsRDu4U1yiw3NXHz-zJhHKrOmtuoYlxQP-s,9091
|
||||
boto3/data/sqs/2012-11-05/resources-1.json,sha256=LRIIr5BId3UDeuBfLn-vRiWsSZCM9_ynqdxF8uzHgy8,6545
|
||||
boto3/docs/__init__.py,sha256=xEUfkpfz3nGn8-siOf_Q1dqPuPGP_WpUGVCtfnJ-XGc,1844
|
||||
boto3/docs/__pycache__/__init__.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/action.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/attr.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/base.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/client.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/collection.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/docstring.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/method.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/resource.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/service.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/subresource.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/utils.cpython-311.pyc,,
|
||||
boto3/docs/__pycache__/waiter.cpython-311.pyc,,
|
||||
boto3/docs/action.py,sha256=mCW9IUvZS1eStA0DrSqD1B_hZBz6YTdrQmbI5d2Jzbo,8122
|
||||
boto3/docs/attr.py,sha256=BnG3tR1KKQvvY58aeJiWQ5W5DiMnJ_9jUjmG6tDbFiU,2500
|
||||
boto3/docs/base.py,sha256=nOrQSCeUSIZPkn-I59o7CfjEthgdkpCt_rXtE9zQnXc,2103
|
||||
boto3/docs/client.py,sha256=HeNMMm0oKClpkzY1yyVT_JbSFkGF92n7Nnv2J5u3bJg,1003
|
||||
boto3/docs/collection.py,sha256=l8x2qW1HHnQsRDbR0yeUnaOGbgo2oAqxDyhyrbf5bes,11296
|
||||
boto3/docs/docstring.py,sha256=oPugaubdAXY6aNa-kXGI51lP1xE2s4AnfTsLhibf7-E,2511
|
||||
boto3/docs/method.py,sha256=MFX6L3SzXoL8Jz1fkuDLZ-OXMMnKuIBI2kkA8-NRvNg,2725
|
||||
boto3/docs/resource.py,sha256=jsFszXfdvnCX7hVxPyARqPU7H4c5D_zfUBi4N8vybZw,15134
|
||||
boto3/docs/service.py,sha256=bCd2LPfZOeTkDOKggTyXJYXXPkuYUy91x5KYyqPPQnE,8544
|
||||
boto3/docs/subresource.py,sha256=WkEA4qmQbrN7Oz9ofypJOPfATXIzjwampAi2m430NbE,5766
|
||||
boto3/docs/utils.py,sha256=H0UeVvmVbYBZ6F-CVEUxVggLMBOIoA5q8y8hxBFnRKE,5436
|
||||
boto3/docs/waiter.py,sha256=EW0DF9XDtbAVzxUZj3kI20fCoTJJnF9ZjaBRrCILBws,5165
|
||||
boto3/dynamodb/__init__.py,sha256=GkSq-WxXWfVHu1SEcMrlJbzkfw9ACgF3UdCL6fPpTmY,562
|
||||
boto3/dynamodb/__pycache__/__init__.cpython-311.pyc,,
|
||||
boto3/dynamodb/__pycache__/conditions.cpython-311.pyc,,
|
||||
boto3/dynamodb/__pycache__/table.cpython-311.pyc,,
|
||||
boto3/dynamodb/__pycache__/transform.cpython-311.pyc,,
|
||||
boto3/dynamodb/__pycache__/types.cpython-311.pyc,,
|
||||
boto3/dynamodb/conditions.py,sha256=sjkd0kIqFP_h8aUvysZQel0zts5HF22ogqKiv0t0KRw,15045
|
||||
boto3/dynamodb/table.py,sha256=ui8oL634pE6UdMiN6Mz50wAjRQkCF1plq9XsbUEgbWw,6340
|
||||
boto3/dynamodb/transform.py,sha256=JnW5ZzPIfxEcDszSvXKUZmp_1rw445tsddS3FG--JwA,12909
|
||||
boto3/dynamodb/types.py,sha256=ch0vIKaAYexjL42S_OJWyvjWMcb0UbNrmkKGcz76O3c,9541
|
||||
boto3/ec2/__init__.py,sha256=GkSq-WxXWfVHu1SEcMrlJbzkfw9ACgF3UdCL6fPpTmY,562
|
||||
boto3/ec2/__pycache__/__init__.cpython-311.pyc,,
|
||||
boto3/ec2/__pycache__/createtags.cpython-311.pyc,,
|
||||
boto3/ec2/__pycache__/deletetags.cpython-311.pyc,,
|
||||
boto3/ec2/createtags.py,sha256=pUPJOYn7m0Jcch9UL-DEVGgbQHoyAemECPBhzyBx28c,1577
|
||||
boto3/ec2/deletetags.py,sha256=KaYcqSt8FFM_TW0g0pZ14qDjVnmRCPV0sMe6DprEtvo,1217
|
||||
boto3/examples/cloudfront.rst,sha256=K-sBWZxoLjABCZHrqAZs57cYefwPmDir03pm6PE_mh4,1390
|
||||
boto3/examples/s3.rst,sha256=a3mbSl7EbNbwd2GKYlP9nXrTHZItZVQRdMG3gamZtSo,5528
|
||||
boto3/exceptions.py,sha256=i13QpGxoFizxAGCzA2qmF9ldbI5IfBpn37DH75ddRF8,4127
|
||||
boto3/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
boto3/resources/__pycache__/__init__.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/action.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/base.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/collection.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/factory.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/model.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/params.cpython-311.pyc,,
|
||||
boto3/resources/__pycache__/response.cpython-311.pyc,,
|
||||
boto3/resources/action.py,sha256=vPfVHVgXiGqhwpgRSCC7lSsY3vGjgsSiYhXa14CMAqw,9600
|
||||
boto3/resources/base.py,sha256=lkMPWTgSh9E1PRVtG-VwCresHbQ8-EVn9RqAqv0jnOE,5012
|
||||
boto3/resources/collection.py,sha256=aVifZoUVHUarGF9S4Ih8qBUfdqKKOBAEd0BaISKaLio,19113
|
||||
boto3/resources/factory.py,sha256=iXV5l7UZePNIfkkUMgUNC0tIdJhxr_65m9KYdwIOfKA,22708
|
||||
boto3/resources/model.py,sha256=kssQzwCclHMmFhl0hcAzhu15489D-IhWfl4OwJ74aIs,20336
|
||||
boto3/resources/params.py,sha256=i6KAjOzjzou7ouViYbRZCz0CwqB6fA_6gOJFDIruTV8,6112
|
||||
boto3/resources/response.py,sha256=aIATkyer_rl5qsp-OFCxe36whvY4JzjgNc9qN-vYMxg,11638
|
||||
boto3/s3/__init__.py,sha256=GkSq-WxXWfVHu1SEcMrlJbzkfw9ACgF3UdCL6fPpTmY,562
|
||||
boto3/s3/__pycache__/__init__.cpython-311.pyc,,
|
||||
boto3/s3/__pycache__/constants.cpython-311.pyc,,
|
||||
boto3/s3/__pycache__/inject.cpython-311.pyc,,
|
||||
boto3/s3/__pycache__/transfer.cpython-311.pyc,,
|
||||
boto3/s3/constants.py,sha256=ZaYknNwqGwsJEGkL92GXaBs9kjfRbyCDFt89wei8t7E,690
|
||||
boto3/s3/inject.py,sha256=roCNwtKHbxSwV6pZuevUJAyL09UKdeK-nVBeLltR7i8,30401
|
||||
boto3/s3/transfer.py,sha256=_0Xxoycr7CiUbLtSoPeRP8cjK7yYHnmDIm59CQPYDAs,15935
|
||||
boto3/session.py,sha256=mEk9ystkkF6UQYtGJQ9EJ5OL8PNbrbArVDYEnF1bX8w,21717
|
||||
boto3/utils.py,sha256=dBw0Eu23TOhDsP1Lkrp4uOVMn5DS8s0kRGwVRiCD_KM,3141
|
||||
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (75.1.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
boto3
|
||||
@@ -0,0 +1,107 @@
|
||||
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
import logging
|
||||
from logging import NullHandler
|
||||
|
||||
from boto3.compat import _warn_deprecated_python
|
||||
from boto3.session import Session
|
||||
|
||||
__author__ = 'Amazon Web Services'
|
||||
__version__ = '1.39.17'
|
||||
|
||||
|
||||
# The default Boto3 session; autoloaded when needed.
|
||||
DEFAULT_SESSION = None
|
||||
|
||||
|
||||
def setup_default_session(**kwargs):
|
||||
"""
|
||||
Set up a default session, passing through any parameters to the session
|
||||
constructor. There is no need to call this unless you wish to pass custom
|
||||
parameters, because a default session will be created for you.
|
||||
"""
|
||||
global DEFAULT_SESSION
|
||||
DEFAULT_SESSION = Session(**kwargs)
|
||||
|
||||
|
||||
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
|
||||
"""
|
||||
Add a stream handler for the given name and level to the logging module.
|
||||
By default, this logs all boto3 messages to ``stdout``.
|
||||
|
||||
>>> import boto3
|
||||
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
|
||||
|
||||
For debugging purposes a good choice is to set the stream logger to ``''``
|
||||
which is equivalent to saying "log everything".
|
||||
|
||||
.. WARNING::
|
||||
Be aware that when logging anything from ``'botocore'`` the full wire
|
||||
trace will appear in your logs. If your payloads contain sensitive data
|
||||
this should not be used in production.
|
||||
|
||||
:type name: string
|
||||
:param name: Log name
|
||||
:type level: int
|
||||
:param level: Logging level, e.g. ``logging.INFO``
|
||||
:type format_string: str
|
||||
:param format_string: Log message format
|
||||
"""
|
||||
if format_string is None:
|
||||
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
|
||||
|
||||
logger = logging.getLogger(name)
|
||||
logger.setLevel(level)
|
||||
handler = logging.StreamHandler()
|
||||
handler.setLevel(level)
|
||||
formatter = logging.Formatter(format_string)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def _get_default_session():
|
||||
"""
|
||||
Get the default session, creating one if needed.
|
||||
|
||||
:rtype: :py:class:`~boto3.session.Session`
|
||||
:return: The default session
|
||||
"""
|
||||
if DEFAULT_SESSION is None:
|
||||
setup_default_session()
|
||||
_warn_deprecated_python()
|
||||
|
||||
return DEFAULT_SESSION
|
||||
|
||||
|
||||
def client(*args, **kwargs):
|
||||
"""
|
||||
Create a low-level service client by name using the default session.
|
||||
|
||||
See :py:meth:`boto3.session.Session.client`.
|
||||
"""
|
||||
return _get_default_session().client(*args, **kwargs)
|
||||
|
||||
|
||||
def resource(*args, **kwargs):
|
||||
"""
|
||||
Create a resource service client by name using the default session.
|
||||
|
||||
See :py:meth:`boto3.session.Session.resource`.
|
||||
"""
|
||||
return _get_default_session().resource(*args, **kwargs)
|
||||
|
||||
|
||||
# Set up do-nothing logging like a library is supposed to.
|
||||
# https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
|
||||
logging.getLogger('boto3').addHandler(NullHandler())
|
||||
@@ -0,0 +1,92 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import sys
|
||||
import os
|
||||
import errno
|
||||
import socket
|
||||
import warnings
|
||||
|
||||
from boto3.exceptions import PythonDeprecationWarning
|
||||
|
||||
# In python3, socket.error is OSError, which is too general
|
||||
# for what we want (i.e FileNotFoundError is a subclass of OSError).
|
||||
# In py3 all the socket related errors are in a newly created
|
||||
# ConnectionError
|
||||
SOCKET_ERROR = ConnectionError
|
||||
|
||||
_APPEND_MODE_CHAR = 'a'
|
||||
|
||||
import collections.abc as collections_abc
|
||||
|
||||
|
||||
if sys.platform.startswith('win'):
|
||||
def rename_file(current_filename, new_filename):
|
||||
try:
|
||||
os.remove(new_filename)
|
||||
except OSError as e:
|
||||
if not e.errno == errno.ENOENT:
|
||||
# We only want to a ignore trying to remove
|
||||
# a file that does not exist. If it fails
|
||||
# for any other reason we should be propagating
|
||||
# that exception.
|
||||
raise
|
||||
os.rename(current_filename, new_filename)
|
||||
else:
|
||||
rename_file = os.rename
|
||||
|
||||
|
||||
def filter_python_deprecation_warnings():
|
||||
"""
|
||||
Invoking this filter acknowledges your runtime will soon be deprecated
|
||||
at which time you will stop receiving all updates to your client.
|
||||
"""
|
||||
warnings.filterwarnings(
|
||||
'ignore',
|
||||
message=".*Boto3 will no longer support Python.*",
|
||||
category=PythonDeprecationWarning,
|
||||
module=r".*boto3\.compat"
|
||||
)
|
||||
|
||||
|
||||
def _warn_deprecated_python():
|
||||
"""Use this template for future deprecation campaigns as needed."""
|
||||
py_37_params = {
|
||||
'date': 'December 13, 2023',
|
||||
'blog_link': (
|
||||
'https://aws.amazon.com/blogs/developer/'
|
||||
'python-support-policy-updates-for-aws-sdks-and-tools/'
|
||||
)
|
||||
}
|
||||
deprecated_versions = {
|
||||
# Example template for future deprecations
|
||||
(3, 7): py_37_params,
|
||||
}
|
||||
py_version = sys.version_info[:2]
|
||||
|
||||
if py_version in deprecated_versions:
|
||||
params = deprecated_versions[py_version]
|
||||
warning = (
|
||||
"Boto3 will no longer support Python {}.{} "
|
||||
"starting {}. To continue receiving service updates, "
|
||||
"bug fixes, and security updates please upgrade to Python 3.8 or "
|
||||
"later. More information can be found here: {}"
|
||||
).format(py_version[0], py_version[1], params['date'], params['blog_link'])
|
||||
warnings.warn(warning, PythonDeprecationWarning)
|
||||
|
||||
|
||||
def is_append_mode(fileobj):
|
||||
return (
|
||||
hasattr(fileobj, 'mode') and
|
||||
isinstance(fileobj.mode, str) and
|
||||
_APPEND_MODE_CHAR in fileobj.mode
|
||||
)
|
||||
@@ -0,0 +1,167 @@
|
||||
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
"""
|
||||
This file contains private functionality for interacting with the AWS
|
||||
Common Runtime library (awscrt) in boto3.
|
||||
|
||||
All code contained within this file is for internal usage within this
|
||||
project and is not intended for external consumption. All interfaces
|
||||
contained within are subject to abrupt breaking changes.
|
||||
"""
|
||||
|
||||
import threading
|
||||
|
||||
import botocore.exceptions
|
||||
from botocore.session import Session
|
||||
from s3transfer.crt import (
|
||||
BotocoreCRTCredentialsWrapper,
|
||||
BotocoreCRTRequestSerializer,
|
||||
CRTTransferManager,
|
||||
acquire_crt_s3_process_lock,
|
||||
create_s3_crt_client,
|
||||
)
|
||||
|
||||
# Singletons for CRT-backed transfers
|
||||
CRT_S3_CLIENT = None
|
||||
BOTOCORE_CRT_SERIALIZER = None
|
||||
|
||||
CLIENT_CREATION_LOCK = threading.Lock()
|
||||
PROCESS_LOCK_NAME = 'boto3'
|
||||
|
||||
|
||||
def _create_crt_client(session, config, region_name, cred_provider):
|
||||
"""Create a CRT S3 Client for file transfer.
|
||||
|
||||
Instantiating many of these may lead to degraded performance or
|
||||
system resource exhaustion.
|
||||
"""
|
||||
create_crt_client_kwargs = {
|
||||
'region': region_name,
|
||||
'use_ssl': True,
|
||||
'crt_credentials_provider': cred_provider,
|
||||
}
|
||||
return create_s3_crt_client(**create_crt_client_kwargs)
|
||||
|
||||
|
||||
def _create_crt_request_serializer(session, region_name):
|
||||
return BotocoreCRTRequestSerializer(
|
||||
session, {'region_name': region_name, 'endpoint_url': None}
|
||||
)
|
||||
|
||||
|
||||
def _create_crt_s3_client(
|
||||
session, config, region_name, credentials, lock, **kwargs
|
||||
):
|
||||
"""Create boto3 wrapper class to manage crt lock reference and S3 client."""
|
||||
cred_wrapper = BotocoreCRTCredentialsWrapper(credentials)
|
||||
cred_provider = cred_wrapper.to_crt_credentials_provider()
|
||||
return CRTS3Client(
|
||||
_create_crt_client(session, config, region_name, cred_provider),
|
||||
lock,
|
||||
region_name,
|
||||
cred_wrapper,
|
||||
)
|
||||
|
||||
|
||||
def _initialize_crt_transfer_primatives(client, config):
|
||||
lock = acquire_crt_s3_process_lock(PROCESS_LOCK_NAME)
|
||||
if lock is None:
|
||||
# If we're unable to acquire the lock, we cannot
|
||||
# use the CRT in this process and should default to
|
||||
# the classic s3transfer manager.
|
||||
return None, None
|
||||
|
||||
session = Session()
|
||||
region_name = client.meta.region_name
|
||||
credentials = client._get_credentials()
|
||||
|
||||
serializer = _create_crt_request_serializer(session, region_name)
|
||||
s3_client = _create_crt_s3_client(
|
||||
session, config, region_name, credentials, lock
|
||||
)
|
||||
return serializer, s3_client
|
||||
|
||||
|
||||
def get_crt_s3_client(client, config):
|
||||
global CRT_S3_CLIENT
|
||||
global BOTOCORE_CRT_SERIALIZER
|
||||
|
||||
with CLIENT_CREATION_LOCK:
|
||||
if CRT_S3_CLIENT is None:
|
||||
serializer, s3_client = _initialize_crt_transfer_primatives(
|
||||
client, config
|
||||
)
|
||||
BOTOCORE_CRT_SERIALIZER = serializer
|
||||
CRT_S3_CLIENT = s3_client
|
||||
|
||||
return CRT_S3_CLIENT
|
||||
|
||||
|
||||
class CRTS3Client:
|
||||
"""
|
||||
This wrapper keeps track of our underlying CRT client, the lock used to
|
||||
acquire it and the region we've used to instantiate the client.
|
||||
|
||||
Due to limitations in the existing CRT interfaces, we can only make calls
|
||||
in a single region and does not support redirects. We track the region to
|
||||
ensure we don't use the CRT client when a successful request cannot be made.
|
||||
"""
|
||||
|
||||
def __init__(self, crt_client, process_lock, region, cred_provider):
|
||||
self.crt_client = crt_client
|
||||
self.process_lock = process_lock
|
||||
self.region = region
|
||||
self.cred_provider = cred_provider
|
||||
|
||||
|
||||
def is_crt_compatible_request(client, crt_s3_client):
|
||||
"""
|
||||
Boto3 client must use same signing region and credentials
|
||||
as the CRT_S3_CLIENT singleton. Otherwise fallback to classic.
|
||||
"""
|
||||
if crt_s3_client is None:
|
||||
return False
|
||||
|
||||
boto3_creds = client._get_credentials()
|
||||
if boto3_creds is None:
|
||||
return False
|
||||
|
||||
is_same_identity = compare_identity(
|
||||
boto3_creds.get_frozen_credentials(), crt_s3_client.cred_provider
|
||||
)
|
||||
is_same_region = client.meta.region_name == crt_s3_client.region
|
||||
return is_same_region and is_same_identity
|
||||
|
||||
|
||||
def compare_identity(boto3_creds, crt_s3_creds):
|
||||
try:
|
||||
crt_creds = crt_s3_creds()
|
||||
except botocore.exceptions.NoCredentialsError:
|
||||
return False
|
||||
|
||||
is_matching_identity = (
|
||||
boto3_creds.access_key == crt_creds.access_key_id
|
||||
and boto3_creds.secret_key == crt_creds.secret_access_key
|
||||
and boto3_creds.token == crt_creds.session_token
|
||||
)
|
||||
return is_matching_identity
|
||||
|
||||
|
||||
def create_crt_transfer_manager(client, config):
|
||||
"""Create a CRTTransferManager for optimized data transfer."""
|
||||
crt_s3_client = get_crt_s3_client(client, config)
|
||||
if is_crt_compatible_request(client, crt_s3_client):
|
||||
return CRTTransferManager(
|
||||
crt_s3_client.crt_client, BOTOCORE_CRT_SERIALIZER
|
||||
)
|
||||
return None
|
||||
+195
@@ -0,0 +1,195 @@
|
||||
{
|
||||
"service": {
|
||||
"actions": {
|
||||
"CreateStack": {
|
||||
"request": { "operation": "CreateStack" },
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "requestParameter", "path": "StackName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Event": {
|
||||
"resource": {
|
||||
"type": "Event",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Stack": {
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Stacks": {
|
||||
"request": { "operation": "DescribeStacks" },
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "response", "path": "Stacks[].StackName" }
|
||||
],
|
||||
"path": "Stacks[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"Event": {
|
||||
"identifiers": [
|
||||
{
|
||||
"name": "Id",
|
||||
"memberName": "EventId"
|
||||
}
|
||||
],
|
||||
"shape": "StackEvent"
|
||||
},
|
||||
"Stack": {
|
||||
"identifiers": [
|
||||
{
|
||||
"name": "Name",
|
||||
"memberName": "StackName"
|
||||
}
|
||||
],
|
||||
"shape": "Stack",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeStacks",
|
||||
"params": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"path": "Stacks[0]"
|
||||
},
|
||||
"actions": {
|
||||
"CancelUpdate": {
|
||||
"request": {
|
||||
"operation": "CancelUpdateStack",
|
||||
"params": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteStack",
|
||||
"params": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Update": {
|
||||
"request": {
|
||||
"operation": "UpdateStack",
|
||||
"params": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Resource": {
|
||||
"resource": {
|
||||
"type": "StackResource",
|
||||
"identifiers": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "LogicalId", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Events": {
|
||||
"request": {
|
||||
"operation": "DescribeStackEvents",
|
||||
"params": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Event",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "StackEvents[].EventId" }
|
||||
],
|
||||
"path": "StackEvents[]"
|
||||
}
|
||||
},
|
||||
"ResourceSummaries": {
|
||||
"request": {
|
||||
"operation": "ListStackResources",
|
||||
"params": [
|
||||
{ "target": "StackName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "StackResourceSummary",
|
||||
"identifiers": [
|
||||
{ "target": "LogicalId", "source": "response", "path": "StackResourceSummaries[].LogicalResourceId" },
|
||||
{ "target": "StackName", "source": "requestParameter", "path": "StackName" }
|
||||
],
|
||||
"path": "StackResourceSummaries[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"StackResource": {
|
||||
"identifiers": [
|
||||
{ "name": "StackName" },
|
||||
{
|
||||
"name": "LogicalId",
|
||||
"memberName": "LogicalResourceId"
|
||||
}
|
||||
],
|
||||
"shape": "StackResourceDetail",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeStackResource",
|
||||
"params": [
|
||||
{ "target": "LogicalResourceId", "source": "identifier", "name": "LogicalId" },
|
||||
{ "target": "StackName", "source": "identifier", "name": "StackName" }
|
||||
]
|
||||
},
|
||||
"path": "StackResourceDetail"
|
||||
},
|
||||
"has": {
|
||||
"Stack": {
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "identifier", "name": "StackName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"StackResourceSummary": {
|
||||
"identifiers": [
|
||||
{ "name": "StackName" },
|
||||
{
|
||||
"name": "LogicalId",
|
||||
"memberName": "LogicalResourceId"
|
||||
}
|
||||
],
|
||||
"shape": "StackResourceSummary",
|
||||
"has": {
|
||||
"Resource": {
|
||||
"resource": {
|
||||
"type": "StackResource",
|
||||
"identifiers": [
|
||||
{ "target": "LogicalId", "source": "identifier", "name": "LogicalId" },
|
||||
{ "target": "StackName", "source": "identifier", "name": "StackName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
+334
@@ -0,0 +1,334 @@
|
||||
{
|
||||
"service": {
|
||||
"has": {
|
||||
"Alarm": {
|
||||
"resource": {
|
||||
"type": "Alarm",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "input"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"Metric": {
|
||||
"resource": {
|
||||
"type": "Metric",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "input"
|
||||
},
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "input"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Alarms": {
|
||||
"request": { "operation": "DescribeAlarms" },
|
||||
"resource": {
|
||||
"type": "Alarm",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "response",
|
||||
"path": "MetricAlarms[].AlarmName"
|
||||
}
|
||||
],
|
||||
"path": "MetricAlarms[]"
|
||||
}
|
||||
},
|
||||
"Metrics": {
|
||||
"request": { "operation": "ListMetrics" },
|
||||
"resource": {
|
||||
"type": "Metric",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "response",
|
||||
"path": "Metrics[].Namespace"
|
||||
},
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "response",
|
||||
"path": "Metrics[].MetricName"
|
||||
}
|
||||
],
|
||||
"path": "Metrics[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"Alarm": {
|
||||
"identifiers": [
|
||||
{
|
||||
"name": "Name",
|
||||
"memberName": "AlarmName"
|
||||
}
|
||||
],
|
||||
"shape": "MetricAlarm",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeAlarms",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[0]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"path": "MetricAlarms[0]"
|
||||
},
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteAlarms",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[0]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"DescribeHistory": {
|
||||
"request": {
|
||||
"operation": "DescribeAlarmHistory",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"DisableActions": {
|
||||
"request": {
|
||||
"operation": "DisableAlarmActions",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[0]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"EnableActions": {
|
||||
"request": {
|
||||
"operation": "EnableAlarmActions",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[0]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"SetState": {
|
||||
"request": {
|
||||
"operation": "SetAlarmState",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"batchActions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteAlarms",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"DisableActions": {
|
||||
"request": {
|
||||
"operation": "DisableAlarmActions",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"EnableActions": {
|
||||
"request": {
|
||||
"operation": "EnableAlarmActions",
|
||||
"params": [
|
||||
{
|
||||
"target": "AlarmNames[]",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Metric": {
|
||||
"resource": {
|
||||
"type": "Metric",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "data",
|
||||
"path": "Namespace"
|
||||
},
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "data",
|
||||
"path": "MetricName"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Metric": {
|
||||
"identifiers": [
|
||||
{
|
||||
"name": "Namespace",
|
||||
"memberName": "Namespace"
|
||||
},
|
||||
{
|
||||
"name": "Name",
|
||||
"memberName": "MetricName"
|
||||
}
|
||||
],
|
||||
"shape": "Metric",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "ListMetrics",
|
||||
"params": [
|
||||
{
|
||||
"target": "MetricName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
},
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "identifier",
|
||||
"name": "Namespace"
|
||||
}
|
||||
]
|
||||
},
|
||||
"path": "Metrics[0]"
|
||||
},
|
||||
"actions": {
|
||||
"GetStatistics": {
|
||||
"request": {
|
||||
"operation": "GetMetricStatistics",
|
||||
"params": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "identifier",
|
||||
"name": "Namespace"
|
||||
},
|
||||
{
|
||||
"target": "MetricName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"PutAlarm": {
|
||||
"request": {
|
||||
"operation": "PutMetricAlarm",
|
||||
"params": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "identifier",
|
||||
"name": "Namespace"
|
||||
},
|
||||
{
|
||||
"target": "MetricName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Alarm",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "requestParameter",
|
||||
"path": "AlarmName"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"PutData": {
|
||||
"request": {
|
||||
"operation": "PutMetricData",
|
||||
"params": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "identifier",
|
||||
"name": "Namespace"
|
||||
},
|
||||
{
|
||||
"target": "MetricData[].MetricName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Alarms": {
|
||||
"request": {
|
||||
"operation": "DescribeAlarmsForMetric",
|
||||
"params": [
|
||||
{
|
||||
"target": "Namespace",
|
||||
"source": "identifier",
|
||||
"name": "Namespace"
|
||||
},
|
||||
{
|
||||
"target": "MetricName",
|
||||
"source": "identifier",
|
||||
"name": "Name"
|
||||
}
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Alarm",
|
||||
"identifiers": [
|
||||
{
|
||||
"target": "Name",
|
||||
"source": "response",
|
||||
"path": "MetricAlarms[].AlarmName"
|
||||
}
|
||||
],
|
||||
"path": "MetricAlarms[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
+150
@@ -0,0 +1,150 @@
|
||||
{
|
||||
"service": {
|
||||
"actions": {
|
||||
"BatchGetItem": {
|
||||
"request": { "operation": "BatchGetItem" }
|
||||
},
|
||||
"BatchWriteItem": {
|
||||
"request": { "operation": "BatchWriteItem" }
|
||||
},
|
||||
"CreateTable": {
|
||||
"request": { "operation": "CreateTable" },
|
||||
"resource": {
|
||||
"type": "Table",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "response", "path": "TableDescription.TableName" }
|
||||
],
|
||||
"path": "TableDescription"
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Table": {
|
||||
"resource": {
|
||||
"type": "Table",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Tables": {
|
||||
"request": { "operation": "ListTables" },
|
||||
"resource": {
|
||||
"type": "Table",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "response", "path": "TableNames[]" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"Table": {
|
||||
"identifiers": [
|
||||
{
|
||||
"name": "Name",
|
||||
"memberName": "TableName"
|
||||
}
|
||||
],
|
||||
"shape": "TableDescription",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeTable",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"path": "Table"
|
||||
},
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteTable",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"DeleteItem": {
|
||||
"request": {
|
||||
"operation": "DeleteItem",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"GetItem": {
|
||||
"request": {
|
||||
"operation": "GetItem",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"PutItem": {
|
||||
"request": {
|
||||
"operation": "PutItem",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Query": {
|
||||
"request": {
|
||||
"operation": "Query",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Scan": {
|
||||
"request": {
|
||||
"operation": "Scan",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Update": {
|
||||
"request": {
|
||||
"operation": "UpdateTable",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Table",
|
||||
"identifiers": [
|
||||
{ "target": "Name", "source": "identifier", "name": "Name" }
|
||||
],
|
||||
"path": "TableDescription"
|
||||
}
|
||||
},
|
||||
"UpdateItem": {
|
||||
"request": {
|
||||
"operation": "UpdateItem",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"waiters":{
|
||||
"Exists": {
|
||||
"waiterName": "TableExists",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"NotExists": {
|
||||
"waiterName": "TableNotExists",
|
||||
"params": [
|
||||
{ "target": "TableName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
+2289
File diff suppressed because it is too large
Load Diff
+2289
File diff suppressed because it is too large
Load Diff
+2289
File diff suppressed because it is too large
Load Diff
+2567
File diff suppressed because it is too large
Load Diff
+2567
File diff suppressed because it is too large
Load Diff
+2567
File diff suppressed because it is too large
Load Diff
+2582
File diff suppressed because it is too large
Load Diff
+581
@@ -0,0 +1,581 @@
|
||||
{
|
||||
"service": {
|
||||
"actions": {
|
||||
"CreateVault": {
|
||||
"request": {
|
||||
"operation": "CreateVault",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "string", "value": "-" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "requestParameter", "path": "accountId" },
|
||||
{ "target": "Name", "source": "requestParameter", "path": "vaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Account": {
|
||||
"resource": {
|
||||
"type": "Account",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Vaults": {
|
||||
"request": {
|
||||
"operation": "ListVaults",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "string", "value": "-" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "requestParameter", "path": "accountId" },
|
||||
{ "target": "Name", "source": "response", "path": "VaultList[].VaultName" }
|
||||
],
|
||||
"path": "VaultList[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"Account": {
|
||||
"identifiers": [
|
||||
{ "name": "Id" }
|
||||
],
|
||||
"actions": {
|
||||
"CreateVault": {
|
||||
"request": {
|
||||
"operation": "CreateVault",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "Id" },
|
||||
{ "target": "Name", "source": "requestParameter", "path": "vaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Vault": {
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "Id" },
|
||||
{ "target": "Name", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Vaults": {
|
||||
"request": {
|
||||
"operation": "ListVaults",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "Id" },
|
||||
{ "target": "Name", "source": "response", "path": "VaultList[].VaultName" }
|
||||
],
|
||||
"path": "VaultList[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Archive": {
|
||||
"identifiers": [
|
||||
{ "name": "AccountId" },
|
||||
{ "name": "VaultName" },
|
||||
{ "name": "Id" }
|
||||
],
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteArchive",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "archiveId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"InitiateArchiveRetrieval": {
|
||||
"request": {
|
||||
"operation": "InitiateJob",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "jobParameters.Type", "source": "string", "value": "archive-retrieval" },
|
||||
{ "target": "jobParameters.ArchiveId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "jobId" },
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Vault": {
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "Name", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Job": {
|
||||
"identifiers": [
|
||||
{ "name": "AccountId" },
|
||||
{ "name": "VaultName" },
|
||||
{
|
||||
"name": "Id",
|
||||
"memberName": "JobId"
|
||||
}
|
||||
],
|
||||
"shape": "GlacierJobDescription",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeJob",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "jobId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"GetOutput": {
|
||||
"request": {
|
||||
"operation": "GetJobOutput",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "jobId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Vault": {
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "Name", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"MultipartUpload": {
|
||||
"identifiers": [
|
||||
{ "name": "AccountId" },
|
||||
{ "name": "VaultName" },
|
||||
{
|
||||
"name": "Id",
|
||||
"memberName": "MultipartUploadId"
|
||||
}
|
||||
],
|
||||
"shape": "UploadListElement",
|
||||
"actions": {
|
||||
"Abort": {
|
||||
"request": {
|
||||
"operation": "AbortMultipartUpload",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "uploadId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Complete": {
|
||||
"request": {
|
||||
"operation": "CompleteMultipartUpload",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "uploadId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Parts": {
|
||||
"request": {
|
||||
"operation": "ListParts",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "uploadId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"UploadPart": {
|
||||
"request": {
|
||||
"operation": "UploadMultipartPart",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" },
|
||||
{ "target": "uploadId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Vault": {
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "Name", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Notification": {
|
||||
"identifiers": [
|
||||
{ "name": "AccountId" },
|
||||
{ "name": "VaultName" }
|
||||
],
|
||||
"shape": "VaultNotificationConfig",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "GetVaultNotifications",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
},
|
||||
"path": "vaultNotificationConfig"
|
||||
},
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteVaultNotifications",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Set": {
|
||||
"request": {
|
||||
"operation": "SetVaultNotifications",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Vault": {
|
||||
"resource": {
|
||||
"type": "Vault",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "Name", "source": "identifier", "name": "VaultName" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Vault": {
|
||||
"identifiers": [
|
||||
{ "name": "AccountId" },
|
||||
{
|
||||
"name": "Name",
|
||||
"memberName": "VaultName"
|
||||
}
|
||||
],
|
||||
"shape": "DescribeVaultOutput",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeVault",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"Create": {
|
||||
"request": {
|
||||
"operation": "CreateVault",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteVault",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"InitiateInventoryRetrieval": {
|
||||
"request": {
|
||||
"operation": "InitiateJob",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "jobParameters.Type", "source": "string", "value": "inventory-retrieval" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "jobId" },
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"InitiateMultipartUpload": {
|
||||
"request": {
|
||||
"operation": "InitiateMultipartUpload",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "MultipartUpload",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "uploadId" },
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"UploadArchive": {
|
||||
"request": {
|
||||
"operation": "UploadArchive",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Archive",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "archiveId" },
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Account": {
|
||||
"resource": {
|
||||
"type": "Account",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Archive": {
|
||||
"resource": {
|
||||
"type": "Archive",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Job": {
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"MultipartUpload": {
|
||||
"resource": {
|
||||
"type": "MultipartUpload",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Notification": {
|
||||
"resource": {
|
||||
"type": "Notification",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"CompletedJobs": {
|
||||
"request": {
|
||||
"operation": "ListJobs",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "completed", "source": "string", "value": "true" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "JobList[].JobId" }
|
||||
],
|
||||
"path": "JobList[]"
|
||||
}
|
||||
},
|
||||
"FailedJobs": {
|
||||
"request": {
|
||||
"operation": "ListJobs",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "statuscode", "source": "string", "value": "Failed" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "JobList[].JobId" }
|
||||
],
|
||||
"path": "JobList[]"
|
||||
}
|
||||
},
|
||||
"Jobs": {
|
||||
"request": {
|
||||
"operation": "ListJobs",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "JobList[].JobId" }
|
||||
],
|
||||
"path": "JobList[]"
|
||||
}
|
||||
},
|
||||
"JobsInProgress": {
|
||||
"request": {
|
||||
"operation": "ListJobs",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "statuscode", "source": "string", "value": "InProgress" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "JobList[].JobId" }
|
||||
],
|
||||
"path": "JobList[]"
|
||||
}
|
||||
},
|
||||
"MultipartUplaods": {
|
||||
"request": {
|
||||
"operation": "ListMultipartUploads",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "MultipartUpload",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "UploadsList[].MultipartUploadId" }
|
||||
],
|
||||
"path": "UploadsList[]"
|
||||
}
|
||||
},
|
||||
"MultipartUploads": {
|
||||
"request": {
|
||||
"operation": "ListMultipartUploads",
|
||||
"params": [
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "MultipartUpload",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "UploadsList[].MultipartUploadId" }
|
||||
],
|
||||
"path": "UploadsList[]"
|
||||
}
|
||||
},
|
||||
"SucceededJobs": {
|
||||
"request": {
|
||||
"operation": "ListJobs",
|
||||
"params": [
|
||||
{ "target": "accountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "vaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "statuscode", "source": "string", "value": "Succeeded" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Job",
|
||||
"identifiers": [
|
||||
{ "target": "AccountId", "source": "identifier", "name": "AccountId" },
|
||||
{ "target": "VaultName", "source": "identifier", "name": "Name" },
|
||||
{ "target": "Id", "source": "response", "path": "JobList[].JobId" }
|
||||
],
|
||||
"path": "JobList[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
+1721
File diff suppressed because it is too large
Load Diff
+173
@@ -0,0 +1,173 @@
|
||||
{
|
||||
"service": {
|
||||
"actions": {
|
||||
"CreateStack": {
|
||||
"request": { "operation": "CreateStack" },
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "StackId" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Layer": {
|
||||
"resource": {
|
||||
"type": "Layer",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Stack": {
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Stacks": {
|
||||
"request": { "operation": "DescribeStacks" },
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "Stacks[].StackId" }
|
||||
],
|
||||
"path": "Stacks[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"Layer": {
|
||||
"identifiers": [
|
||||
{ "name": "Id" }
|
||||
],
|
||||
"shape": "Layer",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeLayers",
|
||||
"params": [
|
||||
{ "target": "LayerIds[]", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"path": "Layers[0]"
|
||||
},
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteLayer",
|
||||
"params": [
|
||||
{ "target": "LayerId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Stack": {
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "data", "path": "StackId" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Stack": {
|
||||
"identifiers": [
|
||||
{ "name": "Id" }
|
||||
],
|
||||
"shape": "Stack",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeStacks",
|
||||
"params": [
|
||||
{ "target": "StackIds[]", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"path": "Stacks[0]"
|
||||
},
|
||||
"actions": {
|
||||
"CreateLayer": {
|
||||
"request": {
|
||||
"operation": "CreateLayer",
|
||||
"params": [
|
||||
{ "target": "StackId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Layer",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "LayerId" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteStack",
|
||||
"params": [
|
||||
{ "target": "StackId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Summary": {
|
||||
"resource": {
|
||||
"type": "StackSummary",
|
||||
"identifiers": [
|
||||
{ "target": "StackId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Layers": {
|
||||
"request": {
|
||||
"operation": "DescribeLayers",
|
||||
"params": [
|
||||
{ "target": "StackId", "source": "identifier", "name": "Id" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Layer",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "response", "path": "Layers[].LayerId" }
|
||||
],
|
||||
"path": "Layers[]"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"StackSummary": {
|
||||
"identifiers": [
|
||||
{ "name": "StackId" }
|
||||
],
|
||||
"shape": "StackSummary",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "DescribeStackSummary",
|
||||
"params": [
|
||||
{ "target": "StackId", "source": "identifier", "name": "StackId" }
|
||||
]
|
||||
},
|
||||
"path": "StackSummary"
|
||||
},
|
||||
"has": {
|
||||
"Stack": {
|
||||
"resource": {
|
||||
"type": "Stack",
|
||||
"identifiers": [
|
||||
{ "target": "Id", "source": "identifier", "name": "StackId" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,327 @@
|
||||
{
|
||||
"service": {
|
||||
"actions": {
|
||||
"CreatePlatformApplication": {
|
||||
"request": { "operation": "CreatePlatformApplication" },
|
||||
"resource": {
|
||||
"type": "PlatformApplication",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "PlatformApplicationArn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"CreateTopic": {
|
||||
"request": { "operation": "CreateTopic" },
|
||||
"resource": {
|
||||
"type": "Topic",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "TopicArn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"PlatformApplication": {
|
||||
"resource": {
|
||||
"type": "PlatformApplication",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"PlatformEndpoint": {
|
||||
"resource": {
|
||||
"type": "PlatformEndpoint",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Subscription": {
|
||||
"resource": {
|
||||
"type": "Subscription",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "input" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Topic": {
|
||||
"resource": {
|
||||
"type": "Topic",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"PlatformApplications": {
|
||||
"request": { "operation": "ListPlatformApplications" },
|
||||
"resource": {
|
||||
"type": "PlatformApplication",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "PlatformApplications[].PlatformApplicationArn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Subscriptions": {
|
||||
"request": { "operation": "ListSubscriptions" },
|
||||
"resource": {
|
||||
"type": "Subscription",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "Subscriptions[].SubscriptionArn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Topics": {
|
||||
"request": { "operation": "ListTopics" },
|
||||
"resource": {
|
||||
"type": "Topic",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "Topics[].TopicArn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"PlatformApplication": {
|
||||
"identifiers": [
|
||||
{ "name": "Arn" }
|
||||
],
|
||||
"shape": "GetPlatformApplicationAttributesResponse",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "GetPlatformApplicationAttributes",
|
||||
"params": [
|
||||
{ "target": "PlatformApplicationArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"CreatePlatformEndpoint": {
|
||||
"request": {
|
||||
"operation": "CreatePlatformEndpoint",
|
||||
"params": [
|
||||
{ "target": "PlatformApplicationArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "PlatformEndpoint",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "EndpointArn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeletePlatformApplication",
|
||||
"params": [
|
||||
{ "target": "PlatformApplicationArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SetAttributes": {
|
||||
"request": {
|
||||
"operation": "SetPlatformApplicationAttributes",
|
||||
"params": [
|
||||
{ "target": "PlatformApplicationArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Endpoints": {
|
||||
"request": {
|
||||
"operation": "ListEndpointsByPlatformApplication",
|
||||
"params": [
|
||||
{ "target": "PlatformApplicationArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "PlatformEndpoint",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "Endpoints[].EndpointArn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"PlatformEndpoint": {
|
||||
"identifiers": [
|
||||
{ "name": "Arn" }
|
||||
],
|
||||
"shape": "GetEndpointAttributesResponse",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "GetEndpointAttributes",
|
||||
"params": [
|
||||
{ "target": "EndpointArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteEndpoint",
|
||||
"params": [
|
||||
{ "target": "EndpointArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Publish": {
|
||||
"request": {
|
||||
"operation": "Publish",
|
||||
"params": [
|
||||
{ "target": "TargetArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SetAttributes": {
|
||||
"request": {
|
||||
"operation": "SetEndpointAttributes",
|
||||
"params": [
|
||||
{ "target": "EndpointArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Subscription": {
|
||||
"identifiers": [
|
||||
{ "name": "Arn" }
|
||||
],
|
||||
"shape": "GetSubscriptionAttributesResponse",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "GetSubscriptionAttributes",
|
||||
"params": [
|
||||
{ "target": "SubscriptionArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "Unsubscribe",
|
||||
"params": [
|
||||
{ "target": "SubscriptionArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SetAttributes": {
|
||||
"request": {
|
||||
"operation": "SetSubscriptionAttributes",
|
||||
"params": [
|
||||
{ "target": "SubscriptionArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Topic": {
|
||||
"identifiers": [
|
||||
{ "name": "Arn" }
|
||||
],
|
||||
"shape": "GetTopicAttributesResponse",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "GetTopicAttributes",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"AddPermission": {
|
||||
"request": {
|
||||
"operation": "AddPermission",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"ConfirmSubscription": {
|
||||
"request": {
|
||||
"operation": "ConfirmSubscription",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Subscription",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "SubscriptionArn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteTopic",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Publish": {
|
||||
"request": {
|
||||
"operation": "Publish",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"RemovePermission": {
|
||||
"request": {
|
||||
"operation": "RemovePermission",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SetAttributes": {
|
||||
"request": {
|
||||
"operation": "SetTopicAttributes",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Subscribe": {
|
||||
"request": {
|
||||
"operation": "Subscribe",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Subscription",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "SubscriptionArn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Subscriptions": {
|
||||
"request": {
|
||||
"operation": "ListSubscriptionsByTopic",
|
||||
"params": [
|
||||
{ "target": "TopicArn", "source": "identifier", "name": "Arn" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Subscription",
|
||||
"identifiers": [
|
||||
{ "target": "Arn", "source": "response", "path": "Subscriptions[].SubscriptionArn" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,232 @@
|
||||
{
|
||||
"service": {
|
||||
"actions": {
|
||||
"CreateQueue": {
|
||||
"request": { "operation": "CreateQueue" },
|
||||
"resource": {
|
||||
"type": "Queue",
|
||||
"identifiers": [
|
||||
{ "target": "Url", "source": "response", "path": "QueueUrl" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"GetQueueByName": {
|
||||
"request": { "operation": "GetQueueUrl" },
|
||||
"resource": {
|
||||
"type": "Queue",
|
||||
"identifiers": [
|
||||
{ "target": "Url", "source": "response", "path": "QueueUrl" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Queue": {
|
||||
"resource": {
|
||||
"type": "Queue",
|
||||
"identifiers": [
|
||||
{ "target": "Url", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"Queues": {
|
||||
"request": { "operation": "ListQueues" },
|
||||
"resource": {
|
||||
"type": "Queue",
|
||||
"identifiers": [
|
||||
{ "target": "Url", "source": "response", "path": "QueueUrls[]" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"Message": {
|
||||
"identifiers": [
|
||||
{ "name": "QueueUrl" },
|
||||
{
|
||||
"name": "ReceiptHandle",
|
||||
"memberName": "ReceiptHandle"
|
||||
}
|
||||
],
|
||||
"shape": "Message",
|
||||
"actions": {
|
||||
"ChangeVisibility": {
|
||||
"request": {
|
||||
"operation": "ChangeMessageVisibility",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "QueueUrl" },
|
||||
{ "target": "ReceiptHandle", "source": "identifier", "name": "ReceiptHandle" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteMessage",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "QueueUrl" },
|
||||
{ "target": "ReceiptHandle", "source": "identifier", "name": "ReceiptHandle" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"batchActions": {
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteMessageBatch",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "QueueUrl" },
|
||||
{ "target": "Entries[*].Id", "source": "data", "path": "MessageId" },
|
||||
{ "target": "Entries[*].ReceiptHandle", "source": "identifier", "name": "ReceiptHandle" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Queue": {
|
||||
"resource": {
|
||||
"type": "Queue",
|
||||
"identifiers": [
|
||||
{ "target": "Url", "source": "identifier", "name": "QueueUrl" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"Queue": {
|
||||
"identifiers": [
|
||||
{ "name": "Url" }
|
||||
],
|
||||
"shape": "GetQueueAttributesResult",
|
||||
"load": {
|
||||
"request": {
|
||||
"operation": "GetQueueAttributes",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" },
|
||||
{ "target": "AttributeNames[]", "source": "string", "value": "All" }
|
||||
]
|
||||
},
|
||||
"path": "@"
|
||||
},
|
||||
"actions": {
|
||||
"AddPermission": {
|
||||
"request": {
|
||||
"operation": "AddPermission",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"ChangeMessageVisibilityBatch": {
|
||||
"request": {
|
||||
"operation": "ChangeMessageVisibilityBatch",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Delete": {
|
||||
"request": {
|
||||
"operation": "DeleteQueue",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"DeleteMessages": {
|
||||
"request": {
|
||||
"operation": "DeleteMessageBatch",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"Purge": {
|
||||
"request": {
|
||||
"operation": "PurgeQueue",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"ReceiveMessages": {
|
||||
"request": {
|
||||
"operation": "ReceiveMessage",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Message",
|
||||
"identifiers": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" },
|
||||
{ "target": "ReceiptHandle", "source": "response", "path": "Messages[].ReceiptHandle" }
|
||||
],
|
||||
"path": "Messages[]"
|
||||
}
|
||||
},
|
||||
"RemovePermission": {
|
||||
"request": {
|
||||
"operation": "RemovePermission",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SendMessage": {
|
||||
"request": {
|
||||
"operation": "SendMessage",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SendMessages": {
|
||||
"request": {
|
||||
"operation": "SendMessageBatch",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"SetAttributes": {
|
||||
"request": {
|
||||
"operation": "SetQueueAttributes",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"has": {
|
||||
"Message": {
|
||||
"resource": {
|
||||
"type": "Message",
|
||||
"identifiers": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" },
|
||||
{ "target": "ReceiptHandle", "source": "input" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"hasMany": {
|
||||
"DeadLetterSourceQueues": {
|
||||
"request": {
|
||||
"operation": "ListDeadLetterSourceQueues",
|
||||
"params": [
|
||||
{ "target": "QueueUrl", "source": "identifier", "name": "Url" }
|
||||
]
|
||||
},
|
||||
"resource": {
|
||||
"type": "Queue",
|
||||
"identifiers": [
|
||||
{ "target": "Url", "source": "response", "path": "queueUrls[]" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,51 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import os
|
||||
|
||||
from botocore.docs import DEPRECATED_SERVICE_NAMES
|
||||
|
||||
from boto3.docs.service import ServiceDocumenter
|
||||
|
||||
|
||||
def generate_docs(root_dir, session):
|
||||
"""Generates the reference documentation for botocore
|
||||
|
||||
This will go through every available AWS service and output ReSTructured
|
||||
text files documenting each service.
|
||||
|
||||
:param root_dir: The directory to write the reference files to. Each
|
||||
service's reference documentation is located at
|
||||
root_dir/reference/services/service-name.rst
|
||||
|
||||
:param session: The boto3 session
|
||||
"""
|
||||
services_doc_path = os.path.join(root_dir, 'reference', 'services')
|
||||
if not os.path.exists(services_doc_path):
|
||||
os.makedirs(services_doc_path)
|
||||
|
||||
# Prevents deprecated service names from being generated in docs.
|
||||
available_services = [
|
||||
service
|
||||
for service in session.get_available_services()
|
||||
if service not in DEPRECATED_SERVICE_NAMES
|
||||
]
|
||||
|
||||
for service_name in available_services:
|
||||
docs = ServiceDocumenter(
|
||||
service_name, session, services_doc_path
|
||||
).document_service()
|
||||
service_doc_path = os.path.join(
|
||||
services_doc_path, service_name + '.rst'
|
||||
)
|
||||
with open(service_doc_path, 'wb') as f:
|
||||
f.write(docs)
|
||||
@@ -0,0 +1,214 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import os
|
||||
|
||||
from botocore import xform_name
|
||||
from botocore.docs.bcdoc.restdoc import DocumentStructure
|
||||
from botocore.docs.method import (
|
||||
document_custom_method,
|
||||
document_model_driven_method,
|
||||
)
|
||||
from botocore.model import OperationModel
|
||||
from botocore.utils import get_service_module_name
|
||||
|
||||
from boto3.docs.base import NestedDocumenter
|
||||
from boto3.docs.method import document_model_driven_resource_method
|
||||
from boto3.docs.utils import (
|
||||
add_resource_type_overview,
|
||||
get_resource_ignore_params,
|
||||
get_resource_public_actions,
|
||||
)
|
||||
|
||||
PUT_DATA_WARNING_MESSAGE = """
|
||||
.. warning::
|
||||
It is recommended to use the :py:meth:`put_metric_data`
|
||||
:doc:`client method <../../cloudwatch/client/put_metric_data>`
|
||||
instead. If you would still like to use this resource method,
|
||||
please make sure that ``MetricData[].MetricName`` is equal to
|
||||
the metric resource's ``name`` attribute.
|
||||
"""
|
||||
|
||||
WARNING_MESSAGES = {
|
||||
"Metric": {"put_data": PUT_DATA_WARNING_MESSAGE},
|
||||
}
|
||||
|
||||
IGNORE_PARAMS = {"Metric": {"put_data": ["Namespace"]}}
|
||||
|
||||
|
||||
class ActionDocumenter(NestedDocumenter):
|
||||
def document_actions(self, section):
|
||||
modeled_actions_list = self._resource_model.actions
|
||||
modeled_actions = {}
|
||||
for modeled_action in modeled_actions_list:
|
||||
modeled_actions[modeled_action.name] = modeled_action
|
||||
resource_actions = get_resource_public_actions(
|
||||
self._resource.__class__
|
||||
)
|
||||
self.member_map['actions'] = sorted(resource_actions)
|
||||
add_resource_type_overview(
|
||||
section=section,
|
||||
resource_type='Actions',
|
||||
description=(
|
||||
'Actions call operations on resources. They may '
|
||||
'automatically handle the passing in of arguments set '
|
||||
'from identifiers and some attributes.'
|
||||
),
|
||||
intro_link='actions_intro',
|
||||
)
|
||||
resource_warnings = WARNING_MESSAGES.get(self._resource_name, {})
|
||||
for action_name in sorted(resource_actions):
|
||||
# Create a new DocumentStructure for each action and add contents.
|
||||
action_doc = DocumentStructure(action_name, target='html')
|
||||
breadcrumb_section = action_doc.add_new_section('breadcrumb')
|
||||
breadcrumb_section.style.ref(self._resource_class_name, 'index')
|
||||
breadcrumb_section.write(f' / Action / {action_name}')
|
||||
action_doc.add_title_section(action_name)
|
||||
warning_message = resource_warnings.get(action_name)
|
||||
if warning_message is not None:
|
||||
action_doc.add_new_section("warning").write(warning_message)
|
||||
action_section = action_doc.add_new_section(
|
||||
action_name,
|
||||
context={'qualifier': f'{self.class_name}.'},
|
||||
)
|
||||
if action_name in ['load', 'reload'] and self._resource_model.load:
|
||||
document_load_reload_action(
|
||||
section=action_section,
|
||||
action_name=action_name,
|
||||
resource_name=self._resource_name,
|
||||
event_emitter=self._resource.meta.client.meta.events,
|
||||
load_model=self._resource_model.load,
|
||||
service_model=self._service_model,
|
||||
)
|
||||
elif action_name in modeled_actions:
|
||||
document_action(
|
||||
section=action_section,
|
||||
resource_name=self._resource_name,
|
||||
event_emitter=self._resource.meta.client.meta.events,
|
||||
action_model=modeled_actions[action_name],
|
||||
service_model=self._service_model,
|
||||
)
|
||||
else:
|
||||
document_custom_method(
|
||||
action_section, action_name, resource_actions[action_name]
|
||||
)
|
||||
# Write actions in individual/nested files.
|
||||
# Path: <root>/reference/services/<service>/<resource_name>/<action_name>.rst
|
||||
actions_dir_path = os.path.join(
|
||||
self._root_docs_path,
|
||||
f'{self._service_name}',
|
||||
f'{self._resource_sub_path}',
|
||||
)
|
||||
action_doc.write_to_file(actions_dir_path, action_name)
|
||||
|
||||
|
||||
def document_action(
|
||||
section,
|
||||
resource_name,
|
||||
event_emitter,
|
||||
action_model,
|
||||
service_model,
|
||||
include_signature=True,
|
||||
):
|
||||
"""Documents a resource action
|
||||
|
||||
:param section: The section to write to
|
||||
|
||||
:param resource_name: The name of the resource
|
||||
|
||||
:param event_emitter: The event emitter to use to emit events
|
||||
|
||||
:param action_model: The model of the action
|
||||
|
||||
:param service_model: The model of the service
|
||||
|
||||
:param include_signature: Whether or not to include the signature.
|
||||
It is useful for generating docstrings.
|
||||
"""
|
||||
operation_model = service_model.operation_model(
|
||||
action_model.request.operation
|
||||
)
|
||||
ignore_params = IGNORE_PARAMS.get(resource_name, {}).get(
|
||||
action_model.name,
|
||||
get_resource_ignore_params(action_model.request.params),
|
||||
)
|
||||
example_return_value = 'response'
|
||||
if action_model.resource:
|
||||
example_return_value = xform_name(action_model.resource.type)
|
||||
example_resource_name = xform_name(resource_name)
|
||||
if service_model.service_name == resource_name:
|
||||
example_resource_name = resource_name
|
||||
example_prefix = (
|
||||
f'{example_return_value} = {example_resource_name}.{action_model.name}'
|
||||
)
|
||||
full_action_name = (
|
||||
f"{section.context.get('qualifier', '')}{action_model.name}"
|
||||
)
|
||||
document_model_driven_resource_method(
|
||||
section=section,
|
||||
method_name=full_action_name,
|
||||
operation_model=operation_model,
|
||||
event_emitter=event_emitter,
|
||||
method_description=operation_model.documentation,
|
||||
example_prefix=example_prefix,
|
||||
exclude_input=ignore_params,
|
||||
resource_action_model=action_model,
|
||||
include_signature=include_signature,
|
||||
)
|
||||
|
||||
|
||||
def document_load_reload_action(
|
||||
section,
|
||||
action_name,
|
||||
resource_name,
|
||||
event_emitter,
|
||||
load_model,
|
||||
service_model,
|
||||
include_signature=True,
|
||||
):
|
||||
"""Documents the resource load action
|
||||
|
||||
:param section: The section to write to
|
||||
|
||||
:param action_name: The name of the loading action should be load or reload
|
||||
|
||||
:param resource_name: The name of the resource
|
||||
|
||||
:param event_emitter: The event emitter to use to emit events
|
||||
|
||||
:param load_model: The model of the load action
|
||||
|
||||
:param service_model: The model of the service
|
||||
|
||||
:param include_signature: Whether or not to include the signature.
|
||||
It is useful for generating docstrings.
|
||||
"""
|
||||
description = (
|
||||
f'Calls :py:meth:`{get_service_module_name(service_model)}.Client.'
|
||||
f'{xform_name(load_model.request.operation)}` to update the attributes of the '
|
||||
f'{resource_name} resource. Note that the load and reload methods are '
|
||||
'the same method and can be used interchangeably.'
|
||||
)
|
||||
example_resource_name = xform_name(resource_name)
|
||||
if service_model.service_name == resource_name:
|
||||
example_resource_name = resource_name
|
||||
example_prefix = f'{example_resource_name}.{action_name}'
|
||||
full_action_name = f"{section.context.get('qualifier', '')}{action_name}"
|
||||
document_model_driven_method(
|
||||
section=section,
|
||||
method_name=full_action_name,
|
||||
operation_model=OperationModel({}, service_model),
|
||||
event_emitter=event_emitter,
|
||||
method_description=description,
|
||||
example_prefix=example_prefix,
|
||||
include_signature=include_signature,
|
||||
)
|
||||
@@ -0,0 +1,72 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
from botocore.docs.params import ResponseParamsDocumenter
|
||||
|
||||
from boto3.docs.utils import get_identifier_description
|
||||
|
||||
|
||||
class ResourceShapeDocumenter(ResponseParamsDocumenter):
|
||||
EVENT_NAME = 'resource-shape'
|
||||
|
||||
|
||||
def document_attribute(
|
||||
section,
|
||||
service_name,
|
||||
resource_name,
|
||||
attr_name,
|
||||
event_emitter,
|
||||
attr_model,
|
||||
include_signature=True,
|
||||
):
|
||||
if include_signature:
|
||||
full_attr_name = f"{section.context.get('qualifier', '')}{attr_name}"
|
||||
section.style.start_sphinx_py_attr(full_attr_name)
|
||||
# Note that an attribute may have one, may have many, or may have no
|
||||
# operations that back the resource's shape. So we just set the
|
||||
# operation_name to the resource name if we ever to hook in and modify
|
||||
# a particular attribute.
|
||||
ResourceShapeDocumenter(
|
||||
service_name=service_name,
|
||||
operation_name=resource_name,
|
||||
event_emitter=event_emitter,
|
||||
).document_params(section=section, shape=attr_model)
|
||||
|
||||
|
||||
def document_identifier(
|
||||
section,
|
||||
resource_name,
|
||||
identifier_model,
|
||||
include_signature=True,
|
||||
):
|
||||
if include_signature:
|
||||
full_identifier_name = (
|
||||
f"{section.context.get('qualifier', '')}{identifier_model.name}"
|
||||
)
|
||||
section.style.start_sphinx_py_attr(full_identifier_name)
|
||||
description = get_identifier_description(
|
||||
resource_name, identifier_model.name
|
||||
)
|
||||
section.write(f'*(string)* {description}')
|
||||
|
||||
|
||||
def document_reference(section, reference_model, include_signature=True):
|
||||
if include_signature:
|
||||
full_reference_name = (
|
||||
f"{section.context.get('qualifier', '')}{reference_model.name}"
|
||||
)
|
||||
section.style.start_sphinx_py_attr(full_reference_name)
|
||||
reference_type = f'(:py:class:`{reference_model.resource.type}`) '
|
||||
section.write(reference_type)
|
||||
section.include_doc_string(
|
||||
f'The related {reference_model.name} if set, otherwise ``None``.'
|
||||
)
|
||||
@@ -0,0 +1,51 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
from botocore.compat import OrderedDict
|
||||
|
||||
|
||||
class BaseDocumenter:
|
||||
def __init__(self, resource):
|
||||
self._resource = resource
|
||||
self._client = self._resource.meta.client
|
||||
self._resource_model = self._resource.meta.resource_model
|
||||
self._service_model = self._client.meta.service_model
|
||||
self._resource_name = self._resource.meta.resource_model.name
|
||||
self._service_name = self._service_model.service_name
|
||||
self._service_docs_name = self._client.__class__.__name__
|
||||
self.member_map = OrderedDict()
|
||||
self.represents_service_resource = (
|
||||
self._service_name == self._resource_name
|
||||
)
|
||||
self._resource_class_name = self._resource_name
|
||||
if self._resource_name == self._service_name:
|
||||
self._resource_class_name = 'ServiceResource'
|
||||
|
||||
@property
|
||||
def class_name(self):
|
||||
return f'{self._service_docs_name}.{self._resource_name}'
|
||||
|
||||
|
||||
class NestedDocumenter(BaseDocumenter):
|
||||
def __init__(self, resource, root_docs_path):
|
||||
super().__init__(resource)
|
||||
self._root_docs_path = root_docs_path
|
||||
self._resource_sub_path = self._resource_name.lower()
|
||||
if self._resource_name == self._service_name:
|
||||
self._resource_sub_path = 'service-resource'
|
||||
|
||||
@property
|
||||
def class_name(self):
|
||||
resource_class_name = self._resource_name
|
||||
if self._resource_name == self._service_name:
|
||||
resource_class_name = 'ServiceResource'
|
||||
return f'{self._service_docs_name}.{resource_class_name}'
|
||||
@@ -0,0 +1,24 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
from botocore.docs.client import ClientDocumenter
|
||||
|
||||
|
||||
class Boto3ClientDocumenter(ClientDocumenter):
|
||||
def _add_client_creation_example(self, section):
|
||||
section.style.start_codeblock()
|
||||
section.style.new_line()
|
||||
section.write('import boto3')
|
||||
section.style.new_line()
|
||||
section.style.new_line()
|
||||
section.write(f'client = boto3.client(\'{self._service_name}\')')
|
||||
section.style.end_codeblock()
|
||||
@@ -0,0 +1,290 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
import os
|
||||
|
||||
from botocore import xform_name
|
||||
from botocore.docs.bcdoc.restdoc import DocumentStructure
|
||||
from botocore.docs.method import get_instance_public_methods
|
||||
from botocore.docs.utils import DocumentedShape
|
||||
|
||||
from boto3.docs.base import NestedDocumenter
|
||||
from boto3.docs.method import document_model_driven_resource_method
|
||||
from boto3.docs.utils import (
|
||||
add_resource_type_overview,
|
||||
get_resource_ignore_params,
|
||||
)
|
||||
|
||||
|
||||
class CollectionDocumenter(NestedDocumenter):
|
||||
def document_collections(self, section):
|
||||
collections = self._resource.meta.resource_model.collections
|
||||
collections_list = []
|
||||
add_resource_type_overview(
|
||||
section=section,
|
||||
resource_type='Collections',
|
||||
description=(
|
||||
'Collections provide an interface to iterate over and '
|
||||
'manipulate groups of resources. '
|
||||
),
|
||||
intro_link='guide_collections',
|
||||
)
|
||||
self.member_map['collections'] = collections_list
|
||||
for collection in collections:
|
||||
collections_list.append(collection.name)
|
||||
# Create a new DocumentStructure for each collection and add contents.
|
||||
collection_doc = DocumentStructure(collection.name, target='html')
|
||||
breadcrumb_section = collection_doc.add_new_section('breadcrumb')
|
||||
breadcrumb_section.style.ref(self._resource_class_name, 'index')
|
||||
breadcrumb_section.write(f' / Collection / {collection.name}')
|
||||
collection_doc.add_title_section(collection.name)
|
||||
collection_section = collection_doc.add_new_section(
|
||||
collection.name,
|
||||
context={'qualifier': f'{self.class_name}.'},
|
||||
)
|
||||
self._document_collection(collection_section, collection)
|
||||
|
||||
# Write collections in individual/nested files.
|
||||
# Path: <root>/reference/services/<service>/<resource_name>/<collection_name>.rst
|
||||
collections_dir_path = os.path.join(
|
||||
self._root_docs_path,
|
||||
f'{self._service_name}',
|
||||
f'{self._resource_sub_path}',
|
||||
)
|
||||
collection_doc.write_to_file(collections_dir_path, collection.name)
|
||||
|
||||
def _document_collection(self, section, collection):
|
||||
methods = get_instance_public_methods(
|
||||
getattr(self._resource, collection.name)
|
||||
)
|
||||
document_collection_object(section, collection)
|
||||
batch_actions = {}
|
||||
for batch_action in collection.batch_actions:
|
||||
batch_actions[batch_action.name] = batch_action
|
||||
|
||||
for method in sorted(methods):
|
||||
method_section = section.add_new_section(method)
|
||||
if method in batch_actions:
|
||||
document_batch_action(
|
||||
section=method_section,
|
||||
resource_name=self._resource_name,
|
||||
event_emitter=self._resource.meta.client.meta.events,
|
||||
batch_action_model=batch_actions[method],
|
||||
collection_model=collection,
|
||||
service_model=self._resource.meta.client.meta.service_model,
|
||||
)
|
||||
else:
|
||||
document_collection_method(
|
||||
section=method_section,
|
||||
resource_name=self._resource_name,
|
||||
action_name=method,
|
||||
event_emitter=self._resource.meta.client.meta.events,
|
||||
collection_model=collection,
|
||||
service_model=self._resource.meta.client.meta.service_model,
|
||||
)
|
||||
|
||||
|
||||
def document_collection_object(
|
||||
section,
|
||||
collection_model,
|
||||
include_signature=True,
|
||||
):
|
||||
"""Documents a collection resource object
|
||||
|
||||
:param section: The section to write to
|
||||
|
||||
:param collection_model: The model of the collection
|
||||
|
||||
:param include_signature: Whether or not to include the signature.
|
||||
It is useful for generating docstrings.
|
||||
"""
|
||||
if include_signature:
|
||||
full_collection_name = (
|
||||
f"{section.context.get('qualifier', '')}{collection_model.name}"
|
||||
)
|
||||
section.style.start_sphinx_py_attr(full_collection_name)
|
||||
section.include_doc_string(
|
||||
f'A collection of {collection_model.resource.type} resources.'
|
||||
)
|
||||
section.include_doc_string(
|
||||
f'A {collection_model.resource.type} Collection will include all '
|
||||
f'resources by default, and extreme caution should be taken when '
|
||||
f'performing actions on all resources.'
|
||||
)
|
||||
|
||||
|
||||
def document_batch_action(
|
||||
section,
|
||||
resource_name,
|
||||
event_emitter,
|
||||
batch_action_model,
|
||||
service_model,
|
||||
collection_model,
|
||||
include_signature=True,
|
||||
):
|
||||
"""Documents a collection's batch action
|
||||
|
||||
:param section: The section to write to
|
||||
|
||||
:param resource_name: The name of the resource
|
||||
|
||||
:param action_name: The name of collection action. Currently only
|
||||
can be all, filter, limit, or page_size
|
||||
|
||||
:param event_emitter: The event emitter to use to emit events
|
||||
|
||||
:param batch_action_model: The model of the batch action
|
||||
|
||||
:param collection_model: The model of the collection
|
||||
|
||||
:param service_model: The model of the service
|
||||
|
||||
:param include_signature: Whether or not to include the signature.
|
||||
It is useful for generating docstrings.
|
||||
"""
|
||||
operation_model = service_model.operation_model(
|
||||
batch_action_model.request.operation
|
||||
)
|
||||
ignore_params = get_resource_ignore_params(
|
||||
batch_action_model.request.params
|
||||
)
|
||||
|
||||
example_return_value = 'response'
|
||||
if batch_action_model.resource:
|
||||
example_return_value = xform_name(batch_action_model.resource.type)
|
||||
|
||||
example_resource_name = xform_name(resource_name)
|
||||
if service_model.service_name == resource_name:
|
||||
example_resource_name = resource_name
|
||||
example_prefix = f'{example_return_value} = {example_resource_name}.{collection_model.name}.{batch_action_model.name}'
|
||||
document_model_driven_resource_method(
|
||||
section=section,
|
||||
method_name=batch_action_model.name,
|
||||
operation_model=operation_model,
|
||||
event_emitter=event_emitter,
|
||||
method_description=operation_model.documentation,
|
||||
example_prefix=example_prefix,
|
||||
exclude_input=ignore_params,
|
||||
resource_action_model=batch_action_model,
|
||||
include_signature=include_signature,
|
||||
)
|
||||
|
||||
|
||||
def document_collection_method(
|
||||
section,
|
||||
resource_name,
|
||||
action_name,
|
||||
event_emitter,
|
||||
collection_model,
|
||||
service_model,
|
||||
include_signature=True,
|
||||
):
|
||||
"""Documents a collection method
|
||||
|
||||
:param section: The section to write to
|
||||
|
||||
:param resource_name: The name of the resource
|
||||
|
||||
:param action_name: The name of collection action. Currently only
|
||||
can be all, filter, limit, or page_size
|
||||
|
||||
:param event_emitter: The event emitter to use to emit events
|
||||
|
||||
:param collection_model: The model of the collection
|
||||
|
||||
:param service_model: The model of the service
|
||||
|
||||
:param include_signature: Whether or not to include the signature.
|
||||
It is useful for generating docstrings.
|
||||
"""
|
||||
operation_model = service_model.operation_model(
|
||||
collection_model.request.operation
|
||||
)
|
||||
|
||||
underlying_operation_members = []
|
||||
if operation_model.input_shape:
|
||||
underlying_operation_members = operation_model.input_shape.members
|
||||
|
||||
example_resource_name = xform_name(resource_name)
|
||||
if service_model.service_name == resource_name:
|
||||
example_resource_name = resource_name
|
||||
|
||||
custom_action_info_dict = {
|
||||
'all': {
|
||||
'method_description': (
|
||||
f'Creates an iterable of all {collection_model.resource.type} '
|
||||
f'resources in the collection.'
|
||||
),
|
||||
'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.all',
|
||||
'exclude_input': underlying_operation_members,
|
||||
},
|
||||
'filter': {
|
||||
'method_description': (
|
||||
f'Creates an iterable of all {collection_model.resource.type} '
|
||||
f'resources in the collection filtered by kwargs passed to '
|
||||
f'method. A {collection_model.resource.type} collection will '
|
||||
f'include all resources by default if no filters are provided, '
|
||||
f'and extreme caution should be taken when performing actions '
|
||||
f'on all resources.'
|
||||
),
|
||||
'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.filter',
|
||||
'exclude_input': get_resource_ignore_params(
|
||||
collection_model.request.params
|
||||
),
|
||||
},
|
||||
'limit': {
|
||||
'method_description': (
|
||||
f'Creates an iterable up to a specified amount of '
|
||||
f'{collection_model.resource.type} resources in the collection.'
|
||||
),
|
||||
'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.limit',
|
||||
'include_input': [
|
||||
DocumentedShape(
|
||||
name='count',
|
||||
type_name='integer',
|
||||
documentation=(
|
||||
'The limit to the number of resources in the iterable.'
|
||||
),
|
||||
)
|
||||
],
|
||||
'exclude_input': underlying_operation_members,
|
||||
},
|
||||
'page_size': {
|
||||
'method_description': (
|
||||
f'Creates an iterable of all {collection_model.resource.type} '
|
||||
f'resources in the collection, but limits the number of '
|
||||
f'items returned by each service call by the specified amount.'
|
||||
),
|
||||
'example_prefix': f'{xform_name(collection_model.resource.type)}_iterator = {example_resource_name}.{collection_model.name}.page_size',
|
||||
'include_input': [
|
||||
DocumentedShape(
|
||||
name='count',
|
||||
type_name='integer',
|
||||
documentation=(
|
||||
'The number of items returned by each service call'
|
||||
),
|
||||
)
|
||||
],
|
||||
'exclude_input': underlying_operation_members,
|
||||
},
|
||||
}
|
||||
if action_name in custom_action_info_dict:
|
||||
action_info = custom_action_info_dict[action_name]
|
||||
document_model_driven_resource_method(
|
||||
section=section,
|
||||
method_name=action_name,
|
||||
operation_model=operation_model,
|
||||
event_emitter=event_emitter,
|
||||
resource_action_model=collection_model,
|
||||
include_signature=include_signature,
|
||||
**action_info,
|
||||
)
|
||||
@@ -0,0 +1,77 @@
|
||||
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
||||
# may not use this file except in compliance with the License. A copy of
|
||||
# the License is located at
|
||||
#
|
||||
# https://aws.amazon.com/apache2.0/
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is
|
||||
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
||||
# ANY KIND, either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
from botocore.docs.docstring import LazyLoadedDocstring
|
||||
|
||||
from boto3.docs.action import document_action, document_load_reload_action
|
||||
from boto3.docs.attr import (
|
||||
document_attribute,
|
||||
document_identifier,
|
||||
document_reference,
|
||||
)
|
||||
from boto3.docs.collection import (
|
||||
document_batch_action,
|
||||
document_collection_method,
|
||||
document_collection_object,
|
||||
)
|
||||
from boto3.docs.subresource import document_sub_resource
|
||||
from boto3.docs.waiter import document_resource_waiter
|
||||
|
||||
|
||||
class ActionDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_action(*args, **kwargs)
|
||||
|
||||
|
||||
class LoadReloadDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_load_reload_action(*args, **kwargs)
|
||||
|
||||
|
||||
class SubResourceDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_sub_resource(*args, **kwargs)
|
||||
|
||||
|
||||
class AttributeDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_attribute(*args, **kwargs)
|
||||
|
||||
|
||||
class IdentifierDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_identifier(*args, **kwargs)
|
||||
|
||||
|
||||
class ReferenceDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_reference(*args, **kwargs)
|
||||
|
||||
|
||||
class CollectionDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_collection_object(*args, **kwargs)
|
||||
|
||||
|
||||
class CollectionMethodDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_collection_method(*args, **kwargs)
|
||||
|
||||
|
||||
class BatchActionDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_batch_action(*args, **kwargs)
|
||||
|
||||
|
||||
class ResourceWaiterDocstring(LazyLoadedDocstring):
|
||||
def _write_docstring(self, *args, **kwargs):
|
||||
document_resource_waiter(*args, **kwargs)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user