Updated script that can be controled by Nodejs web app

This commit is contained in:
mac OS
2024-11-25 12:24:18 +07:00
parent c440eda1f4
commit 8b0ab2bd3a
8662 changed files with 1803808 additions and 34 deletions

View File

@ -0,0 +1,79 @@
import os
import sys
import warnings
from eventlet import convenience
from eventlet import event
from eventlet import greenpool
from eventlet import greenthread
from eventlet import patcher
from eventlet import queue
from eventlet import semaphore
from eventlet import support
from eventlet import timeout
# NOTE(hberaud): Versions are now managed by hatch and control version.
# hatch has a build hook which generates the version file, however,
# if the project is installed in editable mode then the _version.py file
# will not be updated unless the package is reinstalled (or locally rebuilt).
# For further details, please read:
# https://github.com/ofek/hatch-vcs#build-hook
# https://github.com/maresb/hatch-vcs-footgun-example
try:
from eventlet._version import __version__
except ImportError:
__version__ = "0.0.0"
import greenlet
# Force monotonic library search as early as possible.
# Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub.
# Example: gunicorn
# https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352
try:
import monotonic
del monotonic
except ImportError:
pass
connect = convenience.connect
listen = convenience.listen
serve = convenience.serve
StopServe = convenience.StopServe
wrap_ssl = convenience.wrap_ssl
Event = event.Event
GreenPool = greenpool.GreenPool
GreenPile = greenpool.GreenPile
sleep = greenthread.sleep
spawn = greenthread.spawn
spawn_n = greenthread.spawn_n
spawn_after = greenthread.spawn_after
kill = greenthread.kill
import_patched = patcher.import_patched
monkey_patch = patcher.monkey_patch
Queue = queue.Queue
Semaphore = semaphore.Semaphore
CappedSemaphore = semaphore.CappedSemaphore
BoundedSemaphore = semaphore.BoundedSemaphore
Timeout = timeout.Timeout
with_timeout = timeout.with_timeout
wrap_is_timeout = timeout.wrap_is_timeout
is_timeout = timeout.is_timeout
getcurrent = greenlet.greenlet.getcurrent
# deprecated
TimeoutError, exc_after, call_after_global = (
support.wrap_deprecated(old, new)(fun) for old, new, fun in (
('TimeoutError', 'Timeout', Timeout),
('exc_after', 'greenthread.exc_after', greenthread.exc_after),
('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global),
))
os

View File

@ -0,0 +1,16 @@
# file generated by setuptools_scm
# don't change, don't track in version control
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Tuple, Union
VERSION_TUPLE = Tuple[Union[int, str], ...]
else:
VERSION_TUPLE = object
version: str
__version__: str
__version_tuple__: VERSION_TUPLE
version_tuple: VERSION_TUPLE
__version__ = version = '0.38.0'
__version_tuple__ = version_tuple = (0, 38, 0)

View File

@ -0,0 +1,57 @@
"""
Asyncio compatibility functions.
"""
import asyncio
from greenlet import GreenletExit
from .greenthread import spawn, getcurrent
from .event import Event
from .hubs import get_hub
from .hubs.asyncio import Hub as AsyncioHub
__all__ = ["spawn_for_awaitable"]
def spawn_for_awaitable(coroutine):
"""
Take a coroutine or some other object that can be awaited
(``asyncio.Future``, ``asyncio.Task``), and turn it into a ``GreenThread``.
Known limitations:
* The coroutine/future/etc. don't run in their own
greenlet/``GreenThread``.
* As a result, things like ``eventlet.Lock``
won't work correctly inside ``async`` functions, thread ids aren't
meaningful, and so on.
"""
if not isinstance(get_hub(), AsyncioHub):
raise RuntimeError(
"This API only works with eventlet's asyncio hub. "
+ "To use it, set an EVENTLET_HUB=asyncio environment variable."
)
def _run():
# Convert the coroutine/Future/Task we're wrapping into a Future.
future = asyncio.ensure_future(coroutine, loop=asyncio.get_running_loop())
# Ensure killing the GreenThread cancels the Future:
def _got_result(gthread):
try:
gthread.wait()
except GreenletExit:
future.cancel()
getcurrent().link(_got_result)
# Wait until the Future has a result.
has_result = Event()
future.add_done_callback(lambda _: has_result.send(True))
has_result.wait()
# Return the result of the Future (or raise an exception if it had an
# exception).
return future.result()
# Start a GreenThread:
return spawn(_run)

View File

@ -0,0 +1,140 @@
from code import InteractiveConsole
import errno
import socket
import sys
import eventlet
from eventlet import hubs
from eventlet.support import greenlets, get_errno
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
try:
sys.ps2
except AttributeError:
sys.ps2 = '... '
class FileProxy:
def __init__(self, f):
self.f = f
def isatty(self):
return True
def flush(self):
pass
def write(self, data, *a, **kw):
try:
self.f.write(data, *a, **kw)
self.f.flush()
except OSError as e:
if get_errno(e) != errno.EPIPE:
raise
def readline(self, *a):
return self.f.readline(*a).replace('\r\n', '\n')
def __getattr__(self, attr):
return getattr(self.f, attr)
# @@tavis: the `locals` args below mask the built-in function. Should
# be renamed.
class SocketConsole(greenlets.greenlet):
def __init__(self, desc, hostport, locals):
self.hostport = hostport
self.locals = locals
# mangle the socket
self.desc = FileProxy(desc)
greenlets.greenlet.__init__(self)
def run(self):
try:
console = InteractiveConsole(self.locals)
console.interact()
finally:
self.switch_out()
self.finalize()
def switch(self, *args, **kw):
self.saved = sys.stdin, sys.stderr, sys.stdout
sys.stdin = sys.stdout = sys.stderr = self.desc
greenlets.greenlet.switch(self, *args, **kw)
def switch_out(self):
sys.stdin, sys.stderr, sys.stdout = self.saved
def finalize(self):
# restore the state of the socket
self.desc = None
if len(self.hostport) >= 2:
host = self.hostport[0]
port = self.hostport[1]
print("backdoor closed to %s:%s" % (host, port,))
else:
print('backdoor closed')
def backdoor_server(sock, locals=None):
""" Blocking function that runs a backdoor server on the socket *sock*,
accepting connections and running backdoor consoles for each client that
connects.
The *locals* argument is a dictionary that will be included in the locals()
of the interpreters. It can be convenient to stick important application
variables in here.
"""
listening_on = sock.getsockname()
if sock.family == socket.AF_INET:
# Expand result to IP + port
listening_on = '%s:%s' % listening_on
elif sock.family == socket.AF_INET6:
ip, port, _, _ = listening_on
listening_on = '%s:%s' % (ip, port,)
# No action needed if sock.family == socket.AF_UNIX
print("backdoor server listening on %s" % (listening_on,))
try:
while True:
socketpair = None
try:
socketpair = sock.accept()
backdoor(socketpair, locals)
except OSError as e:
# Broken pipe means it was shutdown
if get_errno(e) != errno.EPIPE:
raise
finally:
if socketpair:
socketpair[0].close()
finally:
sock.close()
def backdoor(conn_info, locals=None):
"""Sets up an interactive console on a socket with a single connected
client. This does not block the caller, as it spawns a new greenlet to
handle the console. This is meant to be called from within an accept loop
(such as backdoor_server).
"""
conn, addr = conn_info
if conn.family == socket.AF_INET:
host, port = addr
print("backdoor to %s:%s" % (host, port))
elif conn.family == socket.AF_INET6:
host, port, _, _ = addr
print("backdoor to %s:%s" % (host, port))
else:
print('backdoor opened')
fl = conn.makefile("rw")
console = SocketConsole(fl, addr, locals)
hub = hubs.get_hub()
hub.schedule_call_global(0, console.switch)
if __name__ == '__main__':
backdoor_server(eventlet.listen(('127.0.0.1', 9000)), {})

View File

@ -0,0 +1,190 @@
import sys
import warnings
from eventlet import greenpool
from eventlet import greenthread
from eventlet import support
from eventlet.green import socket
from eventlet.support import greenlets as greenlet
def connect(addr, family=socket.AF_INET, bind=None):
"""Convenience function for opening client sockets.
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param bind: Local address to bind to, optional.
:return: The connected green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if bind is not None:
sock.bind(bind)
sock.connect(addr)
return sock
class ReuseRandomPortWarning(Warning):
pass
class ReusePortUnavailableWarning(Warning):
pass
def listen(addr, family=socket.AF_INET, backlog=50, reuse_addr=True, reuse_port=None):
"""Convenience function for opening server sockets. This
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
Sets SO_REUSEADDR on the socket to save on annoyance.
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param backlog:
The maximum number of queued connections. Should be at least 1; the maximum
value is system-dependent.
:return: The listening green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if reuse_addr and sys.platform[:3] != 'win':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if family in (socket.AF_INET, socket.AF_INET6) and addr[1] == 0:
if reuse_port:
warnings.warn(
'''listen on random port (0) with SO_REUSEPORT is dangerous.
Double check your intent.
Example problem: https://github.com/eventlet/eventlet/issues/411''',
ReuseRandomPortWarning, stacklevel=3)
elif reuse_port is None:
reuse_port = True
if reuse_port and hasattr(socket, 'SO_REUSEPORT'):
# NOTE(zhengwei): linux kernel >= 3.9
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# OSError is enough on Python 3+
except OSError as ex:
if support.get_errno(ex) in (22, 92):
# A famous platform defines unsupported socket option.
# https://github.com/eventlet/eventlet/issues/380
# https://github.com/eventlet/eventlet/issues/418
warnings.warn(
'''socket.SO_REUSEPORT is defined but not supported.
On Windows: known bug, wontfix.
On other systems: please comment in the issue linked below.
More information: https://github.com/eventlet/eventlet/issues/380''',
ReusePortUnavailableWarning, stacklevel=3)
sock.bind(addr)
sock.listen(backlog)
return sock
class StopServe(Exception):
"""Exception class used for quitting :func:`~eventlet.serve` gracefully."""
pass
def _stop_checker(t, server_gt, conn):
try:
try:
t.wait()
finally:
conn.close()
except greenlet.GreenletExit:
pass
except Exception:
greenthread.kill(server_gt, *sys.exc_info())
def serve(sock, handle, concurrency=1000):
"""Runs a server on the supplied socket. Calls the function *handle* in a
separate greenthread for every incoming client connection. *handle* takes
two arguments: the client socket object, and the client address::
def myhandle(client_sock, client_addr):
print("client connected", client_addr)
eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
Returning from *handle* closes the client socket.
:func:`serve` blocks the calling greenthread; it won't return until
the server completes. If you desire an immediate return,
spawn a new greenthread for :func:`serve`.
Any uncaught exceptions raised in *handle* are raised as exceptions
from :func:`serve`, terminating the server, so be sure to be aware of the
exceptions your application can raise. The return value of *handle* is
ignored.
Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
server -- that's the only way to get the server() function to return rather
than raise.
The value in *concurrency* controls the maximum number of
greenthreads that will be open at any time handling requests. When
the server hits the concurrency limit, it stops accepting new
connections until the existing ones complete.
"""
pool = greenpool.GreenPool(concurrency)
server_gt = greenthread.getcurrent()
while True:
try:
conn, addr = sock.accept()
gt = pool.spawn(handle, conn, addr)
gt.link(_stop_checker, server_gt, conn)
conn, addr, gt = None, None, None
except StopServe:
return
def wrap_ssl(sock, *a, **kw):
"""Convenience function for converting a regular socket into an
SSL socket. Has the same interface as :func:`ssl.wrap_socket`,
but can also use PyOpenSSL. Though, note that it ignores the
`cert_reqs`, `ssl_version`, `ca_certs`, `do_handshake_on_connect`,
and `suppress_ragged_eofs` arguments when using PyOpenSSL.
The preferred idiom is to call wrap_ssl directly on the creation
method, e.g., ``wrap_ssl(connect(addr))`` or
``wrap_ssl(listen(addr), server_side=True)``. This way there is
no "naked" socket sitting around to accidentally corrupt the SSL
session.
:return Green SSL object.
"""
return wrap_ssl_impl(sock, *a, **kw)
try:
from eventlet.green import ssl
wrap_ssl_impl = ssl.wrap_socket
except ImportError:
# trying PyOpenSSL
try:
from eventlet.green.OpenSSL import SSL
except ImportError:
def wrap_ssl_impl(*a, **kw):
raise ImportError(
"To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.7 or later.")
else:
def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
cert_reqs=None, ssl_version=None, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
# theoretically the ssl_version could be respected in this line
context = SSL.Context(SSL.SSLv23_METHOD)
if certfile is not None:
context.use_certificate_file(certfile)
if keyfile is not None:
context.use_privatekey_file(keyfile)
context.set_verify(SSL.VERIFY_NONE, lambda *x: True)
connection = SSL.Connection(context, sock)
if server_side:
connection.set_accept_state()
else:
connection.set_connect_state()
return connection

View File

@ -0,0 +1,53 @@
import weakref
from eventlet import greenthread
__all__ = ['get_ident', 'local']
def get_ident():
""" Returns ``id()`` of current greenlet. Useful for debugging."""
return id(greenthread.getcurrent())
# the entire purpose of this class is to store off the constructor
# arguments in a local variable without calling __init__ directly
class _localbase:
__slots__ = '_local__args', '_local__greens'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
return self
def _patch(thrl):
greens = object.__getattribute__(thrl, '_local__greens')
# until we can store the localdict on greenlets themselves,
# we store it in _local__greens on the local object
cur = greenthread.getcurrent()
if cur not in greens:
# must be the first time we've seen this greenlet, call __init__
greens[cur] = {}
cls = type(thrl)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(thrl, '_local__args')
thrl.__init__(*args, **kw)
object.__setattr__(thrl, '__dict__', greens[cur])
class local(_localbase):
def __getattribute__(self, attr):
_patch(self)
return object.__getattribute__(self, attr)
def __setattr__(self, attr, value):
_patch(self)
return object.__setattr__(self, attr, value)
def __delattr__(self, attr):
_patch(self)
return object.__delattr__(self, attr)

View File

@ -0,0 +1,59 @@
from eventlet import event as _event
class metaphore:
"""This is sort of an inverse semaphore: a counter that starts at 0 and
waits only if nonzero. It's used to implement a "wait for all" scenario.
>>> from eventlet import coros, spawn_n
>>> count = coros.metaphore()
>>> count.wait()
>>> def decrementer(count, id):
... print("{0} decrementing".format(id))
... count.dec()
...
>>> _ = spawn_n(decrementer, count, 'A')
>>> _ = spawn_n(decrementer, count, 'B')
>>> count.inc(2)
>>> count.wait()
A decrementing
B decrementing
"""
def __init__(self):
self.counter = 0
self.event = _event.Event()
# send() right away, else we'd wait on the default 0 count!
self.event.send()
def inc(self, by=1):
"""Increment our counter. If this transitions the counter from zero to
nonzero, make any subsequent :meth:`wait` call wait.
"""
assert by > 0
self.counter += by
if self.counter == by:
# If we just incremented self.counter by 'by', and the new count
# equals 'by', then the old value of self.counter was 0.
# Transitioning from 0 to a nonzero value means wait() must
# actually wait.
self.event.reset()
def dec(self, by=1):
"""Decrement our counter. If this transitions the counter from nonzero
to zero, a current or subsequent wait() call need no longer wait.
"""
assert by > 0
self.counter -= by
if self.counter <= 0:
# Don't leave self.counter < 0, that will screw things up in
# future calls.
self.counter = 0
# Transitioning from nonzero to 0 means wait() need no longer wait.
self.event.send()
def wait(self):
"""Suspend the caller only if our count is nonzero. In that case,
resume the caller once the count decrements to zero again.
"""
self.event.wait()

View File

@ -0,0 +1,601 @@
# @file dagpool.py
# @author Nat Goodspeed
# @date 2016-08-08
# @brief Provide DAGPool class
from eventlet.event import Event
from eventlet import greenthread
import collections
# value distinguished from any other Python value including None
_MISSING = object()
class Collision(Exception):
"""
DAGPool raises Collision when you try to launch two greenthreads with the
same key, or post() a result for a key corresponding to a greenthread, or
post() twice for the same key. As with KeyError, str(collision) names the
key in question.
"""
pass
class PropagateError(Exception):
"""
When a DAGPool greenthread terminates with an exception instead of
returning a result, attempting to retrieve its value raises
PropagateError.
Attributes:
key
the key of the greenthread which raised the exception
exc
the exception object raised by the greenthread
"""
def __init__(self, key, exc):
# initialize base class with a reasonable string message
msg = "PropagateError({}): {}: {}" \
.format(key, exc.__class__.__name__, exc)
super().__init__(msg)
self.msg = msg
# Unless we set args, this is unpickleable:
# https://bugs.python.org/issue1692335
self.args = (key, exc)
self.key = key
self.exc = exc
def __str__(self):
return self.msg
class DAGPool:
"""
A DAGPool is a pool that constrains greenthreads, not by max concurrency,
but by data dependencies.
This is a way to implement general DAG dependencies. A simple dependency
tree (flowing in either direction) can straightforwardly be implemented
using recursion and (e.g.)
:meth:`GreenThread.imap() <eventlet.greenthread.GreenThread.imap>`.
What gets complicated is when a given node depends on several other nodes
as well as contributing to several other nodes.
With DAGPool, you concurrently launch all applicable greenthreads; each
will proceed as soon as it has all required inputs. The DAG is implicit in
which items are required by each greenthread.
Each greenthread is launched in a DAGPool with a key: any value that can
serve as a Python dict key. The caller also specifies an iterable of other
keys on which this greenthread depends. This iterable may be empty.
The greenthread callable must accept (key, results), where:
key
is its own key
results
is an iterable of (key, value) pairs.
A newly-launched DAGPool greenthread is entered immediately, and can
perform any necessary setup work. At some point it will iterate over the
(key, value) pairs from the passed 'results' iterable. Doing so blocks the
greenthread until a value is available for each of the keys specified in
its initial dependencies iterable. These (key, value) pairs are delivered
in chronological order, *not* the order in which they are initially
specified: each value will be delivered as soon as it becomes available.
The value returned by a DAGPool greenthread becomes the value for its
key, which unblocks any other greenthreads waiting on that key.
If a DAGPool greenthread terminates with an exception instead of returning
a value, attempting to retrieve the value raises :class:`PropagateError`,
which binds the key of the original greenthread and the original
exception. Unless the greenthread attempting to retrieve the value handles
PropagateError, that exception will in turn be wrapped in a PropagateError
of its own, and so forth. The code that ultimately handles PropagateError
can follow the chain of PropagateError.exc attributes to discover the flow
of that exception through the DAG of greenthreads.
External greenthreads may also interact with a DAGPool. See :meth:`wait_each`,
:meth:`waitall`, :meth:`post`.
It is not recommended to constrain external DAGPool producer greenthreads
in a :class:`GreenPool <eventlet.greenpool.GreenPool>`: it may be hard to
provably avoid deadlock.
.. automethod:: __init__
.. automethod:: __getitem__
"""
_Coro = collections.namedtuple("_Coro", ("greenthread", "pending"))
def __init__(self, preload={}):
"""
DAGPool can be prepopulated with an initial dict or iterable of (key,
value) pairs. These (key, value) pairs are of course immediately
available for any greenthread that depends on any of those keys.
"""
try:
# If a dict is passed, copy it. Don't risk a subsequent
# modification to passed dict affecting our internal state.
iteritems = preload.items()
except AttributeError:
# Not a dict, just an iterable of (key, value) pairs
iteritems = preload
# Load the initial dict
self.values = dict(iteritems)
# track greenthreads
self.coros = {}
# The key to blocking greenthreads is the Event.
self.event = Event()
def waitall(self):
"""
waitall() blocks the calling greenthread until there is a value for
every DAGPool greenthread launched by :meth:`spawn`. It returns a dict
containing all :class:`preload data <DAGPool>`, all data from
:meth:`post` and all values returned by spawned greenthreads.
See also :meth:`wait`.
"""
# waitall() is an alias for compatibility with GreenPool
return self.wait()
def wait(self, keys=_MISSING):
"""
*keys* is an optional iterable of keys. If you omit the argument, it
waits for all the keys from :class:`preload data <DAGPool>`, from
:meth:`post` calls and from :meth:`spawn` calls: in other words, all
the keys of which this DAGPool is aware.
wait() blocks the calling greenthread until all of the relevant keys
have values. wait() returns a dict whose keys are the relevant keys,
and whose values come from the *preload* data, from values returned by
DAGPool greenthreads or from :meth:`post` calls.
If a DAGPool greenthread terminates with an exception, wait() will
raise :class:`PropagateError` wrapping that exception. If more than
one greenthread terminates with an exception, it is indeterminate
which one wait() will raise.
If an external greenthread posts a :class:`PropagateError` instance,
wait() will raise that PropagateError. If more than one greenthread
posts PropagateError, it is indeterminate which one wait() will raise.
See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
"""
# This is mostly redundant with wait_each() functionality.
return dict(self.wait_each(keys))
def wait_each(self, keys=_MISSING):
"""
*keys* is an optional iterable of keys. If you omit the argument, it
waits for all the keys from :class:`preload data <DAGPool>`, from
:meth:`post` calls and from :meth:`spawn` calls: in other words, all
the keys of which this DAGPool is aware.
wait_each() is a generator producing (key, value) pairs as a value
becomes available for each requested key. wait_each() blocks the
calling greenthread until the next value becomes available. If the
DAGPool was prepopulated with values for any of the relevant keys, of
course those can be delivered immediately without waiting.
Delivery order is intentionally decoupled from the initial sequence of
keys: each value is delivered as soon as it becomes available. If
multiple keys are available at the same time, wait_each() delivers
each of the ready ones in arbitrary order before blocking again.
The DAGPool does not distinguish between a value returned by one of
its own greenthreads and one provided by a :meth:`post` call or *preload* data.
The wait_each() generator terminates (raises StopIteration) when all
specified keys have been delivered. Thus, typical usage might be:
::
for key, value in dagpool.wait_each(keys):
# process this ready key and value
# continue processing now that we've gotten values for all keys
By implication, if you pass wait_each() an empty iterable of keys, it
returns immediately without yielding anything.
If the value to be delivered is a :class:`PropagateError` exception object, the
generator raises that PropagateError instead of yielding it.
See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
"""
# Build a local set() and then call _wait_each().
return self._wait_each(self._get_keyset_for_wait_each(keys))
def wait_each_success(self, keys=_MISSING):
"""
wait_each_success() filters results so that only success values are
yielded. In other words, unlike :meth:`wait_each`, wait_each_success()
will not raise :class:`PropagateError`. Not every provided (or
defaulted) key will necessarily be represented, though naturally the
generator will not finish until all have completed.
In all other respects, wait_each_success() behaves like :meth:`wait_each`.
"""
for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
if not isinstance(value, PropagateError):
yield key, value
def wait_each_exception(self, keys=_MISSING):
"""
wait_each_exception() filters results so that only exceptions are
yielded. Not every provided (or defaulted) key will necessarily be
represented, though naturally the generator will not finish until
all have completed.
Unlike other DAGPool methods, wait_each_exception() simply yields
:class:`PropagateError` instances as values rather than raising them.
In all other respects, wait_each_exception() behaves like :meth:`wait_each`.
"""
for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
if isinstance(value, PropagateError):
yield key, value
def _get_keyset_for_wait_each(self, keys):
"""
wait_each(), wait_each_success() and wait_each_exception() promise
that if you pass an iterable of keys, the method will wait for results
from those keys -- but if you omit the keys argument, the method will
wait for results from all known keys. This helper implements that
distinction, returning a set() of the relevant keys.
"""
if keys is not _MISSING:
return set(keys)
else:
# keys arg omitted -- use all the keys we know about
return set(self.coros.keys()) | set(self.values.keys())
def _wait_each(self, pending):
"""
When _wait_each() encounters a value of PropagateError, it raises it.
In all other respects, _wait_each() behaves like _wait_each_raw().
"""
for key, value in self._wait_each_raw(pending):
yield key, self._value_or_raise(value)
@staticmethod
def _value_or_raise(value):
# Most methods attempting to deliver PropagateError should raise that
# instead of simply returning it.
if isinstance(value, PropagateError):
raise value
return value
def _wait_each_raw(self, pending):
"""
pending is a set() of keys for which we intend to wait. THIS SET WILL
BE DESTRUCTIVELY MODIFIED: as each key acquires a value, that key will
be removed from the passed 'pending' set.
_wait_each_raw() does not treat a PropagateError instance specially:
it will be yielded to the caller like any other value.
In all other respects, _wait_each_raw() behaves like wait_each().
"""
while True:
# Before even waiting, show caller any (key, value) pairs that
# are already available. Copy 'pending' because we want to be able
# to remove items from the original set while iterating.
for key in pending.copy():
value = self.values.get(key, _MISSING)
if value is not _MISSING:
# found one, it's no longer pending
pending.remove(key)
yield (key, value)
if not pending:
# Once we've yielded all the caller's keys, done.
break
# There are still more keys pending, so wait.
self.event.wait()
def spawn(self, key, depends, function, *args, **kwds):
"""
Launch the passed *function(key, results, ...)* as a greenthread,
passing it:
- the specified *key*
- an iterable of (key, value) pairs
- whatever other positional args or keywords you specify.
Iterating over the *results* iterable behaves like calling
:meth:`wait_each(depends) <DAGPool.wait_each>`.
Returning from *function()* behaves like
:meth:`post(key, return_value) <DAGPool.post>`.
If *function()* terminates with an exception, that exception is wrapped
in :class:`PropagateError` with the greenthread's *key* and (effectively) posted
as the value for that key. Attempting to retrieve that value will
raise that PropagateError.
Thus, if the greenthread with key 'a' terminates with an exception,
and greenthread 'b' depends on 'a', when greenthread 'b' attempts to
iterate through its *results* argument, it will encounter
PropagateError. So by default, an uncaught exception will propagate
through all the downstream dependencies.
If you pass :meth:`spawn` a key already passed to spawn() or :meth:`post`, spawn()
raises :class:`Collision`.
"""
if key in self.coros or key in self.values:
raise Collision(key)
# The order is a bit tricky. First construct the set() of keys.
pending = set(depends)
# It's important that we pass to _wait_each() the same 'pending' set()
# that we store in self.coros for this key. The generator-iterator
# returned by _wait_each() becomes the function's 'results' iterable.
newcoro = greenthread.spawn(self._wrapper, function, key,
self._wait_each(pending),
*args, **kwds)
# Also capture the same (!) set in the new _Coro object for this key.
# We must be able to observe ready keys being removed from the set.
self.coros[key] = self._Coro(newcoro, pending)
def _wrapper(self, function, key, results, *args, **kwds):
"""
This wrapper runs the top-level function in a DAGPool greenthread,
posting its return value (or PropagateError) to the DAGPool.
"""
try:
# call our passed function
result = function(key, results, *args, **kwds)
except Exception as err:
# Wrap any exception it may raise in a PropagateError.
result = PropagateError(key, err)
finally:
# function() has returned (or terminated with an exception). We no
# longer need to track this greenthread in self.coros. Remove it
# first so post() won't complain about a running greenthread.
del self.coros[key]
try:
# as advertised, try to post() our return value
self.post(key, result)
except Collision:
# if we've already post()ed a result, oh well
pass
# also, in case anyone cares...
return result
def spawn_many(self, depends, function, *args, **kwds):
"""
spawn_many() accepts a single *function* whose parameters are the same
as for :meth:`spawn`.
The difference is that spawn_many() accepts a dependency dict
*depends*. A new greenthread is spawned for each key in the dict. That
dict key's value should be an iterable of other keys on which this
greenthread depends.
If the *depends* dict contains any key already passed to :meth:`spawn`
or :meth:`post`, spawn_many() raises :class:`Collision`. It is
indeterminate how many of the other keys in *depends* will have
successfully spawned greenthreads.
"""
# Iterate over 'depends' items, relying on self.spawn() not to
# context-switch so no one can modify 'depends' along the way.
for key, deps in depends.items():
self.spawn(key, deps, function, *args, **kwds)
def kill(self, key):
"""
Kill the greenthread that was spawned with the specified *key*.
If no such greenthread was spawned, raise KeyError.
"""
# let KeyError, if any, propagate
self.coros[key].greenthread.kill()
# once killed, remove it
del self.coros[key]
def post(self, key, value, replace=False):
"""
post(key, value) stores the passed *value* for the passed *key*. It
then causes each greenthread blocked on its results iterable, or on
:meth:`wait_each(keys) <DAGPool.wait_each>`, to check for new values.
A waiting greenthread might not literally resume on every single
post() of a relevant key, but the first post() of a relevant key
ensures that it will resume eventually, and when it does it will catch
up with all relevant post() calls.
Calling post(key, value) when there is a running greenthread with that
same *key* raises :class:`Collision`. If you must post(key, value) instead of
letting the greenthread run to completion, you must first call
:meth:`kill(key) <DAGPool.kill>`.
The DAGPool implicitly post()s the return value from each of its
greenthreads. But a greenthread may explicitly post() a value for its
own key, which will cause its return value to be discarded.
Calling post(key, value, replace=False) (the default *replace*) when a
value for that key has already been posted, by any means, raises
:class:`Collision`.
Calling post(key, value, replace=True) when a value for that key has
already been posted, by any means, replaces the previously-stored
value. However, that may make it complicated to reason about the
behavior of greenthreads waiting on that key.
After a post(key, value1) followed by post(key, value2, replace=True),
it is unspecified which pending :meth:`wait_each([key...]) <DAGPool.wait_each>`
calls (or greenthreads iterating over *results* involving that key)
will observe *value1* versus *value2*. It is guaranteed that
subsequent wait_each([key...]) calls (or greenthreads spawned after
that point) will observe *value2*.
A successful call to
post(key, :class:`PropagateError(key, ExceptionSubclass) <PropagateError>`)
ensures that any subsequent attempt to retrieve that key's value will
raise that PropagateError instance.
"""
# First, check if we're trying to post() to a key with a running
# greenthread.
# A DAGPool greenthread is explicitly permitted to post() to its
# OWN key.
coro = self.coros.get(key, _MISSING)
if coro is not _MISSING and coro.greenthread is not greenthread.getcurrent():
# oh oh, trying to post a value for running greenthread from
# some other greenthread
raise Collision(key)
# Here, either we're posting a value for a key with no greenthread or
# we're posting from that greenthread itself.
# Has somebody already post()ed a value for this key?
# Unless replace == True, this is a problem.
if key in self.values and not replace:
raise Collision(key)
# Either we've never before posted a value for this key, or we're
# posting with replace == True.
# update our database
self.values[key] = value
# and wake up pending waiters
self.event.send()
# The comment in Event.reset() says: "it's better to create a new
# event rather than reset an old one". Okay, fine. We do want to be
# able to support new waiters, so create a new Event.
self.event = Event()
def __getitem__(self, key):
"""
__getitem__(key) (aka dagpool[key]) blocks until *key* has a value,
then delivers that value.
"""
# This is a degenerate case of wait_each(). Construct a tuple
# containing only this 'key'. wait_each() will yield exactly one (key,
# value) pair. Return just its value.
for _, value in self.wait_each((key,)):
return value
def get(self, key, default=None):
"""
get() returns the value for *key*. If *key* does not yet have a value,
get() returns *default*.
"""
return self._value_or_raise(self.values.get(key, default))
def keys(self):
"""
Return a snapshot tuple of keys for which we currently have values.
"""
# Explicitly return a copy rather than an iterator: don't assume our
# caller will finish iterating before new values are posted.
return tuple(self.values.keys())
def items(self):
"""
Return a snapshot tuple of currently-available (key, value) pairs.
"""
# Don't assume our caller will finish iterating before new values are
# posted.
return tuple((key, self._value_or_raise(value))
for key, value in self.values.items())
def running(self):
"""
Return number of running DAGPool greenthreads. This includes
greenthreads blocked while iterating through their *results* iterable,
that is, greenthreads waiting on values from other keys.
"""
return len(self.coros)
def running_keys(self):
"""
Return keys for running DAGPool greenthreads. This includes
greenthreads blocked while iterating through their *results* iterable,
that is, greenthreads waiting on values from other keys.
"""
# return snapshot; don't assume caller will finish iterating before we
# next modify self.coros
return tuple(self.coros.keys())
def waiting(self):
"""
Return number of waiting DAGPool greenthreads, that is, greenthreads
still waiting on values from other keys. This explicitly does *not*
include external greenthreads waiting on :meth:`wait`,
:meth:`waitall`, :meth:`wait_each`.
"""
# n.b. if Event would provide a count of its waiters, we could say
# something about external greenthreads as well.
# The logic to determine this count is exactly the same as the general
# waiting_for() call.
return len(self.waiting_for())
# Use _MISSING instead of None as the default 'key' param so we can permit
# None as a supported key.
def waiting_for(self, key=_MISSING):
"""
waiting_for(key) returns a set() of the keys for which the DAGPool
greenthread spawned with that *key* is still waiting. If you pass a
*key* for which no greenthread was spawned, waiting_for() raises
KeyError.
waiting_for() without argument returns a dict. Its keys are the keys
of DAGPool greenthreads still waiting on one or more values. In the
returned dict, the value of each such key is the set of other keys for
which that greenthread is still waiting.
This method allows diagnosing a "hung" DAGPool. If certain
greenthreads are making no progress, it's possible that they are
waiting on keys for which there is no greenthread and no :meth:`post` data.
"""
# We may have greenthreads whose 'pending' entry indicates they're
# waiting on some keys even though values have now been posted for
# some or all of those keys, because those greenthreads have not yet
# regained control since values were posted. So make a point of
# excluding values that are now available.
available = set(self.values.keys())
if key is not _MISSING:
# waiting_for(key) is semantically different than waiting_for().
# It's just that they both seem to want the same method name.
coro = self.coros.get(key, _MISSING)
if coro is _MISSING:
# Hmm, no running greenthread with this key. But was there
# EVER a greenthread with this key? If not, let KeyError
# propagate.
self.values[key]
# Oh good, there's a value for this key. Either the
# greenthread finished, or somebody posted a value. Just say
# the greenthread isn't waiting for anything.
return set()
else:
# coro is the _Coro for the running greenthread with the
# specified key.
return coro.pending - available
# This is a waiting_for() call, i.e. a general query rather than for a
# specific key.
# Start by iterating over (key, coro) pairs in self.coros. Generate
# (key, pending) pairs in which 'pending' is the set of keys on which
# the greenthread believes it's waiting, minus the set of keys that
# are now available. Filter out any pair in which 'pending' is empty,
# that is, that greenthread will be unblocked next time it resumes.
# Make a dict from those pairs.
return {key: pending
for key, pending in ((key, (coro.pending - available))
for key, coro in self.coros.items())
if pending}

View File

@ -0,0 +1,460 @@
from collections import deque
from contextlib import contextmanager
import sys
import time
from eventlet.pools import Pool
from eventlet import timeout
from eventlet import hubs
from eventlet.hubs.timer import Timer
from eventlet.greenthread import GreenThread
_MISSING = object()
class ConnectTimeout(Exception):
pass
def cleanup_rollback(conn):
conn.rollback()
class BaseConnectionPool(Pool):
def __init__(self, db_module,
min_size=0, max_size=4,
max_idle=10, max_age=30,
connect_timeout=5,
cleanup=cleanup_rollback,
*args, **kwargs):
"""
Constructs a pool with at least *min_size* connections and at most
*max_size* connections. Uses *db_module* to construct new connections.
The *max_idle* parameter determines how long pooled connections can
remain idle, in seconds. After *max_idle* seconds have elapsed
without the connection being used, the pool closes the connection.
*max_age* is how long any particular connection is allowed to live.
Connections that have been open for longer than *max_age* seconds are
closed, regardless of idle time. If *max_age* is 0, all connections are
closed on return to the pool, reducing it to a concurrency limiter.
*connect_timeout* is the duration in seconds that the pool will wait
before timing out on connect() to the database. If triggered, the
timeout will raise a ConnectTimeout from get().
The remainder of the arguments are used as parameters to the
*db_module*'s connection constructor.
"""
assert(db_module)
self._db_module = db_module
self._args = args
self._kwargs = kwargs
self.max_idle = max_idle
self.max_age = max_age
self.connect_timeout = connect_timeout
self._expiration_timer = None
self.cleanup = cleanup
super().__init__(min_size=min_size, max_size=max_size, order_as_stack=True)
def _schedule_expiration(self):
"""Sets up a timer that will call _expire_old_connections when the
oldest connection currently in the free pool is ready to expire. This
is the earliest possible time that a connection could expire, thus, the
timer will be running as infrequently as possible without missing a
possible expiration.
If this function is called when a timer is already scheduled, it does
nothing.
If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
"""
if self.max_age == 0 or self.max_idle == 0:
# expiration is unnecessary because all connections will be expired
# on put
return
if (self._expiration_timer is not None
and not getattr(self._expiration_timer, 'called', False)):
# the next timer is already scheduled
return
try:
now = time.time()
self._expire_old_connections(now)
# the last item in the list, because of the stack ordering,
# is going to be the most-idle
idle_delay = (self.free_items[-1][0] - now) + self.max_idle
oldest = min([t[1] for t in self.free_items])
age_delay = (oldest - now) + self.max_age
next_delay = min(idle_delay, age_delay)
except (IndexError, ValueError):
# no free items, unschedule ourselves
self._expiration_timer = None
return
if next_delay > 0:
# set up a continuous self-calling loop
self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch,
self._schedule_expiration, [], {})
self._expiration_timer.schedule()
def _expire_old_connections(self, now):
"""Iterates through the open connections contained in the pool, closing
ones that have remained idle for longer than max_idle seconds, or have
been in existence for longer than max_age seconds.
*now* is the current time, as returned by time.time().
"""
original_count = len(self.free_items)
expired = [
conn
for last_used, created_at, conn in self.free_items
if self._is_expired(now, last_used, created_at)]
new_free = [
(last_used, created_at, conn)
for last_used, created_at, conn in self.free_items
if not self._is_expired(now, last_used, created_at)]
self.free_items.clear()
self.free_items.extend(new_free)
# adjust the current size counter to account for expired
# connections
self.current_size -= original_count - len(self.free_items)
for conn in expired:
self._safe_close(conn, quiet=True)
def _is_expired(self, now, last_used, created_at):
"""Returns true and closes the connection if it's expired.
"""
if (self.max_idle <= 0 or self.max_age <= 0
or now - last_used > self.max_idle
or now - created_at > self.max_age):
return True
return False
def _unwrap_connection(self, conn):
"""If the connection was wrapped by a subclass of
BaseConnectionWrapper and is still functional (as determined
by the __nonzero__, or __bool__ in python3, method), returns
the unwrapped connection. If anything goes wrong with this
process, returns None.
"""
base = None
try:
if conn:
base = conn._base
conn._destroy()
else:
base = None
except AttributeError:
pass
return base
def _safe_close(self, conn, quiet=False):
"""Closes the (already unwrapped) connection, squelching any
exceptions.
"""
try:
conn.close()
except AttributeError:
pass # conn is None, or junk
except Exception:
if not quiet:
print("Connection.close raised: %s" % (sys.exc_info()[1]))
def get(self):
conn = super().get()
# None is a flag value that means that put got called with
# something it couldn't use
if conn is None:
try:
conn = self.create()
except Exception:
# unconditionally increase the free pool because
# even if there are waiters, doing a full put
# would incur a greenlib switch and thus lose the
# exception stack
self.current_size -= 1
raise
# if the call to get() draws from the free pool, it will come
# back as a tuple
if isinstance(conn, tuple):
_last_used, created_at, conn = conn
else:
created_at = time.time()
# wrap the connection so the consumer can call close() safely
wrapped = PooledConnectionWrapper(conn, self)
# annotating the wrapper so that when it gets put in the pool
# again, we'll know how old it is
wrapped._db_pool_created_at = created_at
return wrapped
def put(self, conn, cleanup=_MISSING):
created_at = getattr(conn, '_db_pool_created_at', 0)
now = time.time()
conn = self._unwrap_connection(conn)
if self._is_expired(now, now, created_at):
self._safe_close(conn, quiet=False)
conn = None
elif cleanup is not None:
if cleanup is _MISSING:
cleanup = self.cleanup
# by default, call rollback in case the connection is in the middle
# of a transaction. However, rollback has performance implications
# so optionally do nothing or call something else like ping
try:
if conn:
cleanup(conn)
except Exception as e:
# we don't care what the exception was, we just know the
# connection is dead
print("WARNING: cleanup %s raised: %s" % (cleanup, e))
conn = None
except:
conn = None
raise
if conn is not None:
super().put((now, created_at, conn))
else:
# wake up any waiters with a flag value that indicates
# they need to manufacture a connection
if self.waiting() > 0:
super().put(None)
else:
# no waiters -- just change the size
self.current_size -= 1
self._schedule_expiration()
@contextmanager
def item(self, cleanup=_MISSING):
conn = self.get()
try:
yield conn
finally:
self.put(conn, cleanup=cleanup)
def clear(self):
"""Close all connections that this pool still holds a reference to,
and removes all references to them.
"""
if self._expiration_timer:
self._expiration_timer.cancel()
free_items, self.free_items = self.free_items, deque()
for item in free_items:
# Free items created using min_size>0 are not tuples.
conn = item[2] if isinstance(item, tuple) else item
self._safe_close(conn, quiet=True)
self.current_size -= 1
def __del__(self):
self.clear()
class TpooledConnectionPool(BaseConnectionPool):
"""A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
@classmethod
def connect(cls, db_module, connect_timeout, *args, **kw):
t = timeout.Timeout(connect_timeout, ConnectTimeout())
try:
from eventlet import tpool
conn = tpool.execute(db_module.connect, *args, **kw)
return tpool.Proxy(conn, autowrap_names=('cursor',))
finally:
t.cancel()
class RawConnectionPool(BaseConnectionPool):
"""A pool which gives out plain database connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
@classmethod
def connect(cls, db_module, connect_timeout, *args, **kw):
t = timeout.Timeout(connect_timeout, ConnectTimeout())
try:
return db_module.connect(*args, **kw)
finally:
t.cancel()
# default connection pool is the tpool one
ConnectionPool = TpooledConnectionPool
class GenericConnectionWrapper:
def __init__(self, baseconn):
self._base = baseconn
# Proxy all method calls to self._base
# FIXME: remove repetition; options to consider:
# * for name in (...):
# setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw))
# * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
# * other?
def __enter__(self):
return self._base.__enter__()
def __exit__(self, exc, value, tb):
return self._base.__exit__(exc, value, tb)
def __repr__(self):
return self._base.__repr__()
_proxy_funcs = (
'affected_rows',
'autocommit',
'begin',
'change_user',
'character_set_name',
'close',
'commit',
'cursor',
'dump_debug_info',
'errno',
'error',
'errorhandler',
'get_server_info',
'insert_id',
'literal',
'ping',
'query',
'rollback',
'select_db',
'server_capabilities',
'set_character_set',
'set_isolation_level',
'set_server_option',
'set_sql_mode',
'show_warnings',
'shutdown',
'sqlstate',
'stat',
'store_result',
'string_literal',
'thread_id',
'use_result',
'warning_count',
)
for _proxy_fun in GenericConnectionWrapper._proxy_funcs:
# excess wrapper for early binding (closure by value)
def _wrapper(_proxy_fun=_proxy_fun):
def _proxy_method(self, *args, **kwargs):
return getattr(self._base, _proxy_fun)(*args, **kwargs)
_proxy_method.func_name = _proxy_fun
_proxy_method.__name__ = _proxy_fun
_proxy_method.__qualname__ = 'GenericConnectionWrapper.' + _proxy_fun
return _proxy_method
setattr(GenericConnectionWrapper, _proxy_fun, _wrapper(_proxy_fun))
del GenericConnectionWrapper._proxy_funcs
del _proxy_fun
del _wrapper
class PooledConnectionWrapper(GenericConnectionWrapper):
"""A connection wrapper where:
- the close method returns the connection to the pool instead of closing it directly
- ``bool(conn)`` returns a reasonable value
- returns itself to the pool if it gets garbage collected
"""
def __init__(self, baseconn, pool):
super().__init__(baseconn)
self._pool = pool
def __nonzero__(self):
return (hasattr(self, '_base') and bool(self._base))
__bool__ = __nonzero__
def _destroy(self):
self._pool = None
try:
del self._base
except AttributeError:
pass
def close(self):
"""Return the connection to the pool, and remove the
reference to it so that you can't use it again through this
wrapper object.
"""
if self and self._pool:
self._pool.put(self)
self._destroy()
def __del__(self):
return # this causes some issues if __del__ is called in the
# main coroutine, so for now this is disabled
# self.close()
class DatabaseConnector:
"""
This is an object which will maintain a collection of database
connection pools on a per-host basis.
"""
def __init__(self, module, credentials,
conn_pool=None, *args, **kwargs):
"""constructor
*module*
Database module to use.
*credentials*
Mapping of hostname to connect arguments (e.g. username and password)
"""
assert(module)
self._conn_pool_class = conn_pool
if self._conn_pool_class is None:
self._conn_pool_class = ConnectionPool
self._module = module
self._args = args
self._kwargs = kwargs
# this is a map of hostname to username/password
self._credentials = credentials
self._databases = {}
def credentials_for(self, host):
if host in self._credentials:
return self._credentials[host]
else:
return self._credentials.get('default', None)
def get(self, host, dbname):
"""Returns a ConnectionPool to the target host and schema.
"""
key = (host, dbname)
if key not in self._databases:
new_kwargs = self._kwargs.copy()
new_kwargs['db'] = dbname
new_kwargs['host'] = host
new_kwargs.update(self.credentials_for(host))
dbpool = self._conn_pool_class(
self._module, *self._args, **new_kwargs)
self._databases[key] = dbpool
return self._databases[key]

View File

@ -0,0 +1,218 @@
"""The debug module contains utilities and functions for better
debugging Eventlet-powered applications."""
import os
import sys
import linecache
import re
import inspect
__all__ = ['spew', 'unspew', 'format_hub_listeners', 'format_hub_timers',
'hub_listener_stacks', 'hub_exceptions', 'tpool_exceptions',
'hub_prevent_multiple_readers', 'hub_timer_stacks',
'hub_blocking_detection', 'format_asyncio_info',
'format_threads_info']
_token_splitter = re.compile(r'\W+')
class Spew:
def __init__(self, trace_names=None, show_values=True):
self.trace_names = trace_names
self.show_values = show_values
def __call__(self, frame, event, arg):
if event == 'line':
lineno = frame.f_lineno
if '__file__' in frame.f_globals:
filename = frame.f_globals['__file__']
if (filename.endswith('.pyc') or
filename.endswith('.pyo')):
filename = filename[:-1]
name = frame.f_globals['__name__']
line = linecache.getline(filename, lineno)
else:
name = '[unknown]'
try:
src = inspect.getsourcelines(frame)
line = src[lineno]
except OSError:
line = 'Unknown code named [%s]. VM instruction #%d' % (
frame.f_code.co_name, frame.f_lasti)
if self.trace_names is None or name in self.trace_names:
print('%s:%s: %s' % (name, lineno, line.rstrip()))
if not self.show_values:
return self
details = []
tokens = _token_splitter.split(line)
for tok in tokens:
if tok in frame.f_globals:
details.append('%s=%r' % (tok, frame.f_globals[tok]))
if tok in frame.f_locals:
details.append('%s=%r' % (tok, frame.f_locals[tok]))
if details:
print("\t%s" % ' '.join(details))
return self
def spew(trace_names=None, show_values=False):
"""Install a trace hook which writes incredibly detailed logs
about what code is being executed to stdout.
"""
sys.settrace(Spew(trace_names, show_values))
def unspew():
"""Remove the trace hook installed by spew.
"""
sys.settrace(None)
def format_hub_listeners():
""" Returns a formatted string of the current listeners on the current
hub. This can be useful in determining what's going on in the event system,
especially when used in conjunction with :func:`hub_listener_stacks`.
"""
from eventlet import hubs
hub = hubs.get_hub()
result = ['READERS:']
for l in hub.get_readers():
result.append(repr(l))
result.append('WRITERS:')
for l in hub.get_writers():
result.append(repr(l))
return os.linesep.join(result)
def format_asyncio_info():
""" Returns a formatted string of the asyncio info.
This can be useful in determining what's going on in the asyncio event
loop system, especially when used in conjunction with the asyncio hub.
"""
import asyncio
tasks = asyncio.all_tasks()
result = ['TASKS:']
result.append(repr(tasks))
result.append(f'EVENTLOOP: {asyncio.events.get_event_loop()}')
return os.linesep.join(result)
def format_threads_info():
""" Returns a formatted string of the threads info.
This can be useful in determining what's going on with created threads,
especially when used in conjunction with greenlet
"""
import threading
threads = threading._active
result = ['THREADS:']
result.append(repr(threads))
return os.linesep.join(result)
def format_hub_timers():
""" Returns a formatted string of the current timers on the current
hub. This can be useful in determining what's going on in the event system,
especially when used in conjunction with :func:`hub_timer_stacks`.
"""
from eventlet import hubs
hub = hubs.get_hub()
result = ['TIMERS:']
for l in hub.timers:
result.append(repr(l))
return os.linesep.join(result)
def hub_listener_stacks(state=False):
"""Toggles whether or not the hub records the stack when clients register
listeners on file descriptors. This can be useful when trying to figure
out what the hub is up to at any given moment. To inspect the stacks
of the current listeners, call :func:`format_hub_listeners` at critical
junctures in the application logic.
"""
from eventlet import hubs
hubs.get_hub().set_debug_listeners(state)
def hub_timer_stacks(state=False):
"""Toggles whether or not the hub records the stack when timers are set.
To inspect the stacks of the current timers, call :func:`format_hub_timers`
at critical junctures in the application logic.
"""
from eventlet.hubs import timer
timer._g_debug = state
def hub_prevent_multiple_readers(state=True):
"""Toggle prevention of multiple greenlets reading from a socket
When multiple greenlets read from the same socket it is often hard
to predict which greenlet will receive what data. To achieve
resource sharing consider using ``eventlet.pools.Pool`` instead.
It is important to note that this feature is a debug
convenience. That's not a feature made to be integrated in a production
code in some sort.
**If you really know what you are doing** you can change the state
to ``False`` to stop the hub from protecting against this mistake. Else
we strongly discourage using this feature, or you should consider using it
really carefully.
You should be aware that disabling this prevention will be applied to
your entire stack and not only to the context where you may find it useful,
meaning that using this debug feature may have several significant
unexpected side effects on your process, which could cause race conditions
between your sockets and on all your I/O in general.
You should also notice that this debug convenience is not supported
by the Asyncio hub, which is the official plan for migrating off of
eventlet. Using this feature will lock your migration path.
"""
from eventlet.hubs import hub, get_hub
from eventlet.hubs import asyncio
if not state and isinstance(get_hub(), asyncio.Hub):
raise RuntimeError("Multiple readers are not yet supported by asyncio hub")
hub.g_prevent_multiple_readers = state
def hub_exceptions(state=True):
"""Toggles whether the hub prints exceptions that are raised from its
timers. This can be useful to see how greenthreads are terminating.
"""
from eventlet import hubs
hubs.get_hub().set_timer_exceptions(state)
from eventlet import greenpool
greenpool.DEBUG = state
def tpool_exceptions(state=False):
"""Toggles whether tpool itself prints exceptions that are raised from
functions that are executed in it, in addition to raising them like
it normally does."""
from eventlet import tpool
tpool.QUIET = not state
def hub_blocking_detection(state=False, resolution=1):
"""Toggles whether Eventlet makes an effort to detect blocking
behavior in an application.
It does this by telling the kernel to raise a SIGALARM after a
short timeout, and clearing the timeout every time the hub
greenlet is resumed. Therefore, any code that runs for a long
time without yielding to the hub will get interrupted by the
blocking detector (don't use it in production!).
The *resolution* argument governs how long the SIGALARM timeout
waits in seconds. The implementation uses :func:`signal.setitimer`
and can be specified as a floating-point value.
The shorter the resolution, the greater the chance of false
positives.
"""
from eventlet import hubs
assert resolution > 0
hubs.get_hub().debug_blocking = state
hubs.get_hub().debug_blocking_resolution = resolution
if not state:
hubs.get_hub().block_detect_post()

View File

@ -0,0 +1,218 @@
from eventlet import hubs
from eventlet.support import greenlets as greenlet
__all__ = ['Event']
class NOT_USED:
def __repr__(self):
return 'NOT_USED'
NOT_USED = NOT_USED()
class Event:
"""An abstraction where an arbitrary number of coroutines
can wait for one event from another.
Events are similar to a Queue that can only hold one item, but differ
in two important ways:
1. calling :meth:`send` never unschedules the current greenthread
2. :meth:`send` can only be called once; create a new event to send again.
They are good for communicating results between coroutines, and
are the basis for how
:meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>`
is implemented.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def baz(b):
... evt.send(b + 1)
...
>>> _ = eventlet.spawn_n(baz, 3)
>>> evt.wait()
4
"""
_result = None
_exc = None
def __init__(self):
self._waiters = set()
self.reset()
def __str__(self):
params = (self.__class__.__name__, hex(id(self)),
self._result, self._exc, len(self._waiters))
return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
def reset(self):
# this is kind of a misfeature and doesn't work perfectly well,
# it's better to create a new event rather than reset an old one
# removing documentation so that we don't get new use cases for it
assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
self._result = NOT_USED
self._exc = None
def ready(self):
""" Return true if the :meth:`wait` call will return immediately.
Used to avoid waiting for things that might take a while to time out.
For example, you can put a bunch of events into a list, and then visit
them all repeatedly, calling :meth:`ready` until one returns ``True``,
and then you can :meth:`wait` on that one."""
return self._result is not NOT_USED
def has_exception(self):
return self._exc is not None
def has_result(self):
return self._result is not NOT_USED and self._exc is None
def poll(self, notready=None):
if self.ready():
return self.wait()
return notready
# QQQ make it return tuple (type, value, tb) instead of raising
# because
# 1) "poll" does not imply raising
# 2) it's better not to screw up caller's sys.exc_info() by default
# (e.g. if caller wants to calls the function in except or finally)
def poll_exception(self, notready=None):
if self.has_exception():
return self.wait()
return notready
def poll_result(self, notready=None):
if self.has_result():
return self.wait()
return notready
def wait(self, timeout=None):
"""Wait until another coroutine calls :meth:`send`.
Returns the value the other coroutine passed to :meth:`send`.
>>> import eventlet
>>> evt = eventlet.Event()
>>> def wait_on():
... retval = evt.wait()
... print("waited for {0}".format(retval))
>>> _ = eventlet.spawn(wait_on)
>>> evt.send('result')
>>> eventlet.sleep(0)
waited for result
Returns immediately if the event has already occurred.
>>> evt.wait()
'result'
When the timeout argument is present and not None, it should be a floating point number
specifying a timeout for the operation in seconds (or fractions thereof).
"""
current = greenlet.getcurrent()
if self._result is NOT_USED:
hub = hubs.get_hub()
self._waiters.add(current)
timer = None
if timeout is not None:
timer = hub.schedule_call_local(timeout, self._do_send, None, None, current)
try:
result = hub.switch()
if timer is not None:
timer.cancel()
return result
finally:
self._waiters.discard(current)
if self._exc is not None:
current.throw(*self._exc)
return self._result
def send(self, result=None, exc=None):
"""Makes arrangements for the waiters to be woken with the
result and then returns immediately to the parent.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def waiter():
... print('about to wait')
... result = evt.wait()
... print('waited for {0}'.format(result))
>>> _ = eventlet.spawn(waiter)
>>> eventlet.sleep(0)
about to wait
>>> evt.send('a')
>>> eventlet.sleep(0)
waited for a
It is an error to call :meth:`send` multiple times on the same event.
>>> evt.send('whoops') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AssertionError: Trying to re-send() an already-triggered event.
Use :meth:`reset` between :meth:`send` s to reuse an event object.
"""
assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
self._result = result
if exc is not None and not isinstance(exc, tuple):
exc = (exc, )
self._exc = exc
hub = hubs.get_hub()
for waiter in self._waiters:
hub.schedule_call_global(
0, self._do_send, self._result, self._exc, waiter)
def _do_send(self, result, exc, waiter):
if waiter in self._waiters:
if exc is None:
waiter.switch(result)
else:
waiter.throw(*exc)
def send_exception(self, *args):
"""Same as :meth:`send`, but sends an exception to waiters.
The arguments to send_exception are the same as the arguments
to ``raise``. If a single exception object is passed in, it
will be re-raised when :meth:`wait` is called, generating a
new stacktrace.
>>> from eventlet import event
>>> evt = event.Event()
>>> evt.send_exception(RuntimeError())
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eventlet/event.py", line 120, in wait
current.throw(*self._exc)
RuntimeError
If it's important to preserve the entire original stack trace,
you must pass in the entire :func:`sys.exc_info` tuple.
>>> import sys
>>> evt = event.Event()
>>> try:
... raise RuntimeError()
... except RuntimeError:
... evt.send_exception(*sys.exc_info())
...
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eventlet/event.py", line 120, in wait
current.throw(*self._exc)
File "<stdin>", line 2, in <module>
RuntimeError
Note that doing so stores a traceback object directly on the
Event object, which may cause reference cycles. See the
:func:`sys.exc_info` documentation.
"""
# the arguments and the same as for greenlet.throw
return self.send(None, args)

View File

@ -0,0 +1,15 @@
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import SocketServer
patcher.inject(
'http.server',
globals(),
('socket', socket),
('SocketServer', SocketServer),
('socketserver', SocketServer))
del patcher
if __name__ == '__main__':
test()

View File

@ -0,0 +1,17 @@
from eventlet import patcher
from eventlet.green import BaseHTTPServer
from eventlet.green import SimpleHTTPServer
from eventlet.green import urllib
from eventlet.green import select
test = None # bind prior to patcher.inject to silence pyflakes warning below
patcher.inject(
'http.server',
globals(),
('urllib', urllib),
('select', select))
del patcher
if __name__ == '__main__':
test() # pyflakes false alarm here unless test = None above

View File

@ -0,0 +1,40 @@
__MySQLdb = __import__('MySQLdb')
__all__ = __MySQLdb.__all__
__patched__ = ["connect", "Connect", 'Connection', 'connections']
from eventlet.patcher import slurp_properties
slurp_properties(
__MySQLdb, globals(),
ignore=__patched__, srckeys=dir(__MySQLdb))
from eventlet import tpool
__orig_connections = __import__('MySQLdb.connections').connections
def Connection(*args, **kw):
conn = tpool.execute(__orig_connections.Connection, *args, **kw)
return tpool.Proxy(conn, autowrap_names=('cursor',))
connect = Connect = Connection
# replicate the MySQLdb.connections module but with a tpooled Connection factory
class MySQLdbConnectionsModule:
pass
connections = MySQLdbConnectionsModule()
for var in dir(__orig_connections):
if not var.startswith('__'):
setattr(connections, var, getattr(__orig_connections, var))
connections.Connection = Connection
cursors = __import__('MySQLdb.cursors').cursors
converters = __import__('MySQLdb.converters').converters
# TODO support instantiating cursors.FooCursor objects directly
# TODO though this is a low priority, it would be nice if we supported
# subclassing eventlet.green.MySQLdb.connections.Connection

View File

@ -0,0 +1,125 @@
from OpenSSL import SSL as orig_SSL
from OpenSSL.SSL import *
from eventlet.support import get_errno
from eventlet import greenio
from eventlet.hubs import trampoline
import socket
class GreenConnection(greenio.GreenSocket):
""" Nonblocking wrapper for SSL.Connection objects.
"""
def __init__(self, ctx, sock=None):
if sock is not None:
fd = orig_SSL.Connection(ctx, sock)
else:
# if we're given a Connection object directly, use it;
# this is used in the inherited accept() method
fd = ctx
super(ConnectionType, self).__init__(fd)
def do_handshake(self):
""" Perform an SSL handshake (usually called after renegotiate or one of
set_accept_state or set_accept_state). This can raise the same exceptions as
send and recv. """
if self.act_non_blocking:
return self.fd.do_handshake()
while True:
try:
return self.fd.do_handshake()
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
def dup(self):
raise NotImplementedError("Dup not supported on SSL sockets")
def makefile(self, mode='r', bufsize=-1):
raise NotImplementedError("Makefile not supported on SSL sockets")
def read(self, size):
"""Works like a blocking call to SSL_read(), whose behavior is
described here: http://www.openssl.org/docs/ssl/SSL_read.html"""
if self.act_non_blocking:
return self.fd.read(size)
while True:
try:
return self.fd.read(size)
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except SysCallError as e:
if get_errno(e) == -1 or get_errno(e) > 0:
return ''
recv = read
def write(self, data):
"""Works like a blocking call to SSL_write(), whose behavior is
described here: http://www.openssl.org/docs/ssl/SSL_write.html"""
if not data:
return 0 # calling SSL_write() with 0 bytes to be sent is undefined
if self.act_non_blocking:
return self.fd.write(data)
while True:
try:
return self.fd.write(data)
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
send = write
def sendall(self, data):
"""Send "all" data on the connection. This calls send() repeatedly until
all data is sent. If an error occurs, it's impossible to tell how much data
has been sent.
No return value."""
tail = self.send(data)
while tail < len(data):
tail += self.send(data[tail:])
def shutdown(self):
if self.act_non_blocking:
return self.fd.shutdown()
while True:
try:
return self.fd.shutdown()
except WantReadError:
trampoline(self.fd.fileno(),
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
except WantWriteError:
trampoline(self.fd.fileno(),
write=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout)
Connection = ConnectionType = GreenConnection
del greenio

View File

@ -0,0 +1,9 @@
from . import crypto
from . import SSL
try:
# pyopenssl tsafe module was deprecated and removed in v20.0.0
# https://github.com/pyca/pyopenssl/pull/913
from . import tsafe
except ImportError:
pass
from .version import __version__

View File

@ -0,0 +1 @@
from OpenSSL.crypto import *

View File

@ -0,0 +1 @@
from OpenSSL.tsafe import *

View File

@ -0,0 +1 @@
from OpenSSL.version import __version__, __doc__

View File

@ -0,0 +1,33 @@
from eventlet import queue
__all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue']
__patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
# these classes exist to paper over the major operational difference between
# eventlet.queue.Queue and the stdlib equivalents
class Queue(queue.Queue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super().__init__(maxsize)
class PriorityQueue(queue.PriorityQueue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super().__init__(maxsize)
class LifoQueue(queue.LifoQueue):
def __init__(self, maxsize=0):
if maxsize == 0:
maxsize = None
super().__init__(maxsize)
Empty = queue.Empty
Full = queue.Full

View File

@ -0,0 +1,13 @@
from eventlet import patcher
from eventlet.green import BaseHTTPServer
from eventlet.green import urllib
patcher.inject(
'http.server',
globals(),
('urllib', urllib))
del patcher
if __name__ == '__main__':
test()

View File

@ -0,0 +1,14 @@
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import select
from eventlet.green import threading
patcher.inject(
'socketserver',
globals(),
('socket', socket),
('select', select),
('threading', threading))
# QQQ ForkingMixIn should be fixed to use green waitpid?

View File

@ -0,0 +1 @@
# this package contains modules from the standard library converted to use eventlet

View File

@ -0,0 +1,33 @@
__socket = __import__('socket')
__all__ = __socket.__all__
__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout']
import eventlet.patcher
eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket))
os = __import__('os')
import sys
from eventlet import greenio
socket = greenio.GreenSocket
_GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT
timeout = greenio.socket_timeout
try:
__original_fromfd__ = __socket.fromfd
def fromfd(*args):
return socket(__original_fromfd__(*args))
except AttributeError:
pass
try:
__original_socketpair__ = __socket.socketpair
def socketpair(*args):
one, two = __original_socketpair__(*args)
return socket(one), socket(two)
except AttributeError:
pass

View File

@ -0,0 +1,14 @@
import sys
if sys.version_info < (3, 12):
from eventlet import patcher
from eventlet.green import asyncore
from eventlet.green import socket
patcher.inject(
'asynchat',
globals(),
('asyncore', asyncore),
('socket', socket))
del patcher

View File

@ -0,0 +1,16 @@
import sys
if sys.version_info < (3, 12):
from eventlet import patcher
from eventlet.green import select
from eventlet.green import socket
from eventlet.green import time
patcher.inject(
"asyncore",
globals(),
('select', select),
('socket', socket),
('time', time))
del patcher

View File

@ -0,0 +1,38 @@
"""
In order to detect a filehandle that's been closed, our only clue may be
the operating system returning the same filehandle in response to some
other operation.
The builtins 'file' and 'open' are patched to collaborate with the
notify_opened protocol.
"""
builtins_orig = __builtins__
from eventlet import hubs
from eventlet.hubs import hub
from eventlet.patcher import slurp_properties
import sys
__all__ = dir(builtins_orig)
__patched__ = ['open']
slurp_properties(builtins_orig, globals(),
ignore=__patched__, srckeys=dir(builtins_orig))
hubs.get_hub()
__original_open = open
__opening = False
def open(*args, **kwargs):
global __opening
result = __original_open(*args, **kwargs)
if not __opening:
# This is incredibly ugly. 'open' is used under the hood by
# the import process. So, ensure we don't wind up in an
# infinite loop.
__opening = True
hubs.notify_opened(result.fileno())
__opening = False
return result

View File

@ -0,0 +1,13 @@
from eventlet import patcher
# *NOTE: there might be some funny business with the "SOCKS" module
# if it even still exists
from eventlet.green import socket
patcher.inject('ftplib', globals(), ('socket', socket))
del patcher
# Run test program when run as a script
if __name__ == '__main__':
test()

View File

@ -0,0 +1,189 @@
# This is part of Python source code with Eventlet-specific modifications.
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved" are retained in Python alone or in any derivative version prepared by
# Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
from enum import IntEnum
__all__ = ['HTTPStatus']
class HTTPStatus(IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
"""
def __new__(cls, value, phrase, description=''):
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
# informational
CONTINUE = 100, 'Continue', 'Request received, please continue'
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
CREATED = 201, 'Created', 'Document created, URL follows'
ACCEPTED = (202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = (203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
# redirection
MULTIPLE_CHOICES = (300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = (301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
NOT_MODIFIED = (304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = (305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
'Object moved temporarily -- see URI list')
# client error
BAD_REQUEST = (400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = (401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = (402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = (403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = (404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = (406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = (407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = (408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = 409, 'Conflict', 'Request conflict'
GONE = (410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = (411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = (412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
# server errors
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = (501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = (502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = (511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,691 @@
# This is part of Python source code with Eventlet-specific modifications.
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved
#
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
# analyze, test, perform and/or display publicly, prepare derivative works,
# distribute, and otherwise use Python alone or in any derivative version,
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
# Reserved" are retained in Python alone or in any derivative version prepared by
# Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
####
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software
# and its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Timothy O'Malley not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
####
#
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
# by Timothy O'Malley <timo@alum.mit.edu>
#
# Cookie.py is a Python module for the handling of HTTP
# cookies as a Python dictionary. See RFC 2109 for more
# information on cookies.
#
# The original idea to treat Cookies as a dictionary came from
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
# first version of nscookie.py.
#
####
r"""
Here's a sample session to show how to use this module.
At the moment, this is the only documentation.
The Basics
----------
Importing is easy...
>>> from http import cookies
Most of the time you start by creating a cookie.
>>> C = cookies.SimpleCookie()
Once you've created your Cookie, you can add values just as if it were
a dictionary.
>>> C = cookies.SimpleCookie()
>>> C["fig"] = "newton"
>>> C["sugar"] = "wafer"
>>> C.output()
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
Notice that the printable representation of a Cookie is the
appropriate format for a Set-Cookie: header. This is the
default behavior. You can change the header and printed
attributes by using the .output() function
>>> C = cookies.SimpleCookie()
>>> C["rocky"] = "road"
>>> C["rocky"]["path"] = "/cookie"
>>> print(C.output(header="Cookie:"))
Cookie: rocky=road; Path=/cookie
>>> print(C.output(attrs=[], header="Cookie:"))
Cookie: rocky=road
The load() method of a Cookie extracts cookies from a string. In a
CGI script, you would use this method to extract the cookies from the
HTTP_COOKIE environment variable.
>>> C = cookies.SimpleCookie()
>>> C.load("chips=ahoy; vienna=finger")
>>> C.output()
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
The load() method is darn-tootin smart about identifying cookies
within a string. Escaped quotation marks, nested semicolons, and other
such trickeries do not confuse it.
>>> C = cookies.SimpleCookie()
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
>>> print(C)
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
Each element of the Cookie also supports all of the RFC 2109
Cookie attributes. Here's an example which sets the Path
attribute.
>>> C = cookies.SimpleCookie()
>>> C["oreo"] = "doublestuff"
>>> C["oreo"]["path"] = "/"
>>> print(C)
Set-Cookie: oreo=doublestuff; Path=/
Each dictionary element has a 'value' attribute, which gives you
back the value associated with the key.
>>> C = cookies.SimpleCookie()
>>> C["twix"] = "none for you"
>>> C["twix"].value
'none for you'
The SimpleCookie expects that all values should be standard strings.
Just to be sure, SimpleCookie invokes the str() builtin to convert
the value to a string, when the values are set dictionary-style.
>>> C = cookies.SimpleCookie()
>>> C["number"] = 7
>>> C["string"] = "seven"
>>> C["number"].value
'7'
>>> C["string"].value
'seven'
>>> C.output()
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
Finis.
"""
#
# Import our required modules
#
import re
import string
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
_nulljoin = ''.join
_semispacejoin = '; '.join
_spacejoin = ' '.join
def _warn_deprecated_setter(setter):
import warnings
msg = ('The .%s setter is deprecated. The attribute will be read-only in '
'future releases. Please use the set() method instead.' % setter)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
#
# Define an exception visible to External modules
#
class CookieError(Exception):
pass
# These quoting routines conform to the RFC2109 specification, which in
# turn references the character definitions from RFC2068. They provide
# a two-way quoting algorithm. Any non-text character is translated
# into a 4 character sequence: a forward-slash followed by the
# three-digit octal equivalent of the character. Any '\' or '"' is
# quoted with a preceding '\' slash.
# Because of the way browsers really handle cookies (as opposed to what
# the RFC says) we also encode "," and ";".
#
# These are taken from RFC2068 and RFC2109.
# _LegalChars is the list of chars which don't require "'s
# _Translator hash-table for fast quoting
#
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'
_Translator = {n: '\\%03o' % n
for n in set(range(256)) - set(map(ord, _UnescapedChars))}
_Translator.update({
ord('"'): '\\"',
ord('\\'): '\\\\',
})
# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
_is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match
def _quote(str):
r"""Quote a string for use in a cookie header.
If the string does not need to be double-quoted, then just return the
string. Otherwise, surround the string in doublequotes and quote
(with a \) special characters.
"""
if str is None or _is_legal_key(str):
return str
else:
return '"' + str.translate(_Translator) + '"'
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
_QuotePatt = re.compile(r"[\\].")
def _unquote(str):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if str is None or len(str) < 2:
return str
if str[0] != '"' or str[-1] != '"':
return str
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
str = str[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(str)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(str, i)
q_match = _QuotePatt.search(str, i)
if not o_match and not q_match: # Neither matched
res.append(str[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(str[i:k])
res.append(str[k+1])
i = k + 2
else: # OctalPatt matched
res.append(str[i:j])
res.append(chr(int(str[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res)
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
# header. By default, _getdate() returns the current time in the appropriate
# "expires" format for a Set-Cookie header. The one optional argument is an
# offset from now, in seconds. For example, an offset of -3600 means "one hour
# ago". The offset may be a floating point number.
#
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
_monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
from eventlet.green.time import gmtime, time
now = time()
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
class Morsel(dict):
"""A class to hold ONE (key, value) pair.
In a cookie, each such pair may have several attributes, so this class is
used to keep the attributes associated with the appropriate key,value pair.
This class also includes a coded_value attribute, which is used to hold
the network representation of the value. This is most useful when Python
objects are pickled for network transit.
"""
# RFC 2109 lists these attributes as reserved:
# path comment domain
# max-age secure version
#
# For historical reasons, these attributes are also reserved:
# expires
#
# This is an extension from Microsoft:
# httponly
#
# This dictionary provides a mapping from the lowercase
# variant on the left to the appropriate traditional
# formatting on the right.
_reserved = {
"expires" : "expires",
"path" : "Path",
"comment" : "Comment",
"domain" : "Domain",
"max-age" : "Max-Age",
"secure" : "Secure",
"httponly" : "HttpOnly",
"version" : "Version",
}
_flags = {'secure', 'httponly'}
def __init__(self):
# Set defaults
self._key = self._value = self._coded_value = None
# Set default attributes
for key in self._reserved:
dict.__setitem__(self, key, "")
@property
def key(self):
return self._key
@key.setter
def key(self, key):
_warn_deprecated_setter('key')
self._key = key
@property
def value(self):
return self._value
@value.setter
def value(self, value):
_warn_deprecated_setter('value')
self._value = value
@property
def coded_value(self):
return self._coded_value
@coded_value.setter
def coded_value(self, coded_value):
_warn_deprecated_setter('coded_value')
self._coded_value = coded_value
def __setitem__(self, K, V):
K = K.lower()
if not K in self._reserved:
raise CookieError("Invalid attribute %r" % (K,))
dict.__setitem__(self, K, V)
def setdefault(self, key, val=None):
key = key.lower()
if key not in self._reserved:
raise CookieError("Invalid attribute %r" % (key,))
return dict.setdefault(self, key, val)
def __eq__(self, morsel):
if not isinstance(morsel, Morsel):
return NotImplemented
return (dict.__eq__(self, morsel) and
self._value == morsel._value and
self._key == morsel._key and
self._coded_value == morsel._coded_value)
__ne__ = object.__ne__
def copy(self):
morsel = Morsel()
dict.update(morsel, self)
morsel.__dict__.update(self.__dict__)
return morsel
def update(self, values):
data = {}
for key, val in dict(values).items():
key = key.lower()
if key not in self._reserved:
raise CookieError("Invalid attribute %r" % (key,))
data[key] = val
dict.update(self, data)
def isReservedKey(self, K):
return K.lower() in self._reserved
def set(self, key, val, coded_val, LegalChars=_LegalChars):
if LegalChars != _LegalChars:
import warnings
warnings.warn(
'LegalChars parameter is deprecated, ignored and will '
'be removed in future versions.', DeprecationWarning,
stacklevel=2)
if key.lower() in self._reserved:
raise CookieError('Attempt to set a reserved key %r' % (key,))
if not _is_legal_key(key):
raise CookieError('Illegal key %r' % (key,))
# It's a good key, so save it.
self._key = key
self._value = val
self._coded_value = coded_val
def __getstate__(self):
return {
'key': self._key,
'value': self._value,
'coded_value': self._coded_value,
}
def __setstate__(self, state):
self._key = state['key']
self._value = state['value']
self._coded_value = state['coded_value']
def output(self, attrs=None, header="Set-Cookie:"):
return "%s %s" % (header, self.OutputString(attrs))
__str__ = output
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
def js_output(self, attrs=None):
# Print javascript
return """
<script type="text/javascript">
<!-- begin hiding
document.cookie = \"%s\";
// end hiding -->
</script>
""" % (self.OutputString(attrs).replace('"', r'\"'))
def OutputString(self, attrs=None):
# Build up our result
#
result = []
append = result.append
# First, the key=value pair
append("%s=%s" % (self.key, self.coded_value))
# Now add any defined attributes
if attrs is None:
attrs = self._reserved
items = sorted(self.items())
for key, value in items:
if value == "":
continue
if key not in attrs:
continue
if key == "expires" and isinstance(value, int):
append("%s=%s" % (self._reserved[key], _getdate(value)))
elif key == "max-age" and isinstance(value, int):
append("%s=%d" % (self._reserved[key], value))
elif key in self._flags:
if value:
append(str(self._reserved[key]))
else:
append("%s=%s" % (self._reserved[key], value))
# Return the result
return _semispacejoin(result)
#
# Pattern for finding cookie
#
# This used to be strict parsing based on the RFC2109 and RFC2068
# specifications. I have since discovered that MSIE 3.0x doesn't
# follow the character rules outlined in those specs. As a
# result, the parsing rules here are less strict.
#
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
_LegalValueChars = _LegalKeyChars + r'\[\]'
_CookiePattern = re.compile(r"""
(?x) # This is a verbose pattern
\s* # Optional whitespace at start of cookie
(?P<key> # Start of group 'key'
[""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
) # End of group 'key'
( # Optional group: there may not be a value.
\s*=\s* # Equal Sign
(?P<val> # Start of group 'val'
"(?:[^\\"]|\\.)*" # Any doublequoted string
| # or
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
| # or
[""" + _LegalValueChars + r"""]* # Any word or empty string
) # End of group 'val'
)? # End of optional value group
\s* # Any number of spaces.
(\s+|;|$) # Ending either at space, semicolon, or EOS.
""", re.ASCII) # May be removed if safe.
# At long last, here is the cookie class. Using this class is almost just like
# using a dictionary. See this module's docstring for example usage.
#
class BaseCookie(dict):
"""A container class for a set of Morsels."""
def value_decode(self, val):
"""real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie's value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.
"""
return val, val
def value_encode(self, val):
"""real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie's value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.
"""
strval = str(val)
return strval, strval
def __init__(self, input=None):
if input:
self.load(input)
def __set(self, key, real_value, coded_value):
"""Private method for setting a cookie's value"""
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
def __setitem__(self, key, value):
"""Dictionary style assignment."""
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
rval, cval = self.value_encode(value)
self.__set(key, rval, cval)
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
"""Return a string suitable for HTTP."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.output(attrs, header))
return sep.join(result)
__str__ = output
def __repr__(self):
l = []
items = sorted(self.items())
for key, value in items:
l.append('%s=%s' % (key, repr(value.value)))
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
def js_output(self, attrs=None):
"""Return a string suitable for JavaScript."""
result = []
items = sorted(self.items())
for key, value in items:
result.append(value.js_output(attrs))
return _nulljoin(result)
def load(self, rawdata):
"""Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary 'd'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())
"""
if isinstance(rawdata, str):
self.__parse_string(rawdata)
else:
# self.update() wouldn't call our custom __setitem__
for key, value in rawdata.items():
self[key] = value
return
def __parse_string(self, str, patt=_CookiePattern):
i = 0 # Our starting point
n = len(str) # Length of string
parsed_items = [] # Parsed (type, key, value) triples
morsel_seen = False # A key=value pair was previously encountered
TYPE_ATTRIBUTE = 1
TYPE_KEYVALUE = 2
# We first parse the whole cookie string and reject it if it's
# syntactically invalid (this helps avoid some classes of injection
# attacks).
while 0 <= i < n:
# Start looking for a cookie
match = patt.match(str, i)
if not match:
# No more cookies
break
key, value = match.group("key"), match.group("val")
i = match.end(0)
if key[0] == "$":
if not morsel_seen:
# We ignore attributes which pertain to the cookie
# mechanism as a whole, such as "$Version".
# See RFC 2965. (Does anyone care?)
continue
parsed_items.append((TYPE_ATTRIBUTE, key[1:], value))
elif key.lower() in Morsel._reserved:
if not morsel_seen:
# Invalid cookie string
return
if value is None:
if key.lower() in Morsel._flags:
parsed_items.append((TYPE_ATTRIBUTE, key, True))
else:
# Invalid cookie string
return
else:
parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value)))
elif value is not None:
parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value)))
morsel_seen = True
else:
# Invalid cookie string
return
# The cookie string is valid, apply it.
M = None # current morsel
for tp, key, value in parsed_items:
if tp == TYPE_ATTRIBUTE:
assert M is not None
M[key] = value
else:
assert tp == TYPE_KEYVALUE
rval, cval = value
self.__set(key, rval, cval)
M = self[key]
class SimpleCookie(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,18 @@
from eventlet import patcher
from eventlet.green import socket
to_patch = [('socket', socket)]
try:
from eventlet.green import ssl
to_patch.append(('ssl', ssl))
except ImportError:
pass
from eventlet.green.http import client
for name in dir(client):
if name not in patcher.__exclude:
globals()[name] = getattr(client, name)
if __name__ == '__main__':
test()

View File

@ -0,0 +1,133 @@
os_orig = __import__("os")
import errno
socket = __import__("socket")
from stat import S_ISREG
from eventlet import greenio
from eventlet.support import get_errno
from eventlet import greenthread
from eventlet import hubs
from eventlet.patcher import slurp_properties
__all__ = os_orig.__all__
__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
slurp_properties(
os_orig,
globals(),
ignore=__patched__,
srckeys=dir(os_orig))
def fdopen(fd, *args, **kw):
"""fdopen(fd [, mode='r' [, bufsize]]) -> file_object
Return an open file object connected to a file descriptor."""
if not isinstance(fd, int):
raise TypeError('fd should be int, not %r' % fd)
try:
return greenio.GreenPipe(fd, *args, **kw)
except OSError as e:
raise OSError(*e.args)
__original_read__ = os_orig.read
def read(fd, n):
"""read(fd, buffersize) -> string
Read a file descriptor."""
while True:
# don't wait to read for regular files
# select/poll will always return True while epoll will simply crash
st_mode = os_orig.stat(fd).st_mode
if not S_ISREG(st_mode):
try:
hubs.trampoline(fd, read=True)
except hubs.IOClosed:
return ''
try:
return __original_read__(fd, n)
except OSError as e:
if get_errno(e) == errno.EPIPE:
return ''
if get_errno(e) != errno.EAGAIN:
raise
__original_write__ = os_orig.write
def write(fd, st):
"""write(fd, string) -> byteswritten
Write a string to a file descriptor.
"""
while True:
# don't wait to write for regular files
# select/poll will always return True while epoll will simply crash
st_mode = os_orig.stat(fd).st_mode
if not S_ISREG(st_mode):
try:
hubs.trampoline(fd, write=True)
except hubs.IOClosed:
return 0
try:
return __original_write__(fd, st)
except OSError as e:
if get_errno(e) not in [errno.EAGAIN, errno.EPIPE]:
raise
def wait():
"""wait() -> (pid, status)
Wait for completion of a child process."""
return waitpid(0, 0)
__original_waitpid__ = os_orig.waitpid
def waitpid(pid, options):
"""waitpid(...)
waitpid(pid, options) -> (pid, status)
Wait for completion of a given child process."""
if options & os_orig.WNOHANG != 0:
return __original_waitpid__(pid, options)
else:
new_options = options | os_orig.WNOHANG
while True:
rpid, status = __original_waitpid__(pid, new_options)
if rpid and status >= 0:
return rpid, status
greenthread.sleep(0.01)
__original_open__ = os_orig.open
def open(file, flags, mode=0o777, dir_fd=None):
""" Wrap os.open
This behaves identically, but collaborates with
the hub's notify_opened protocol.
"""
# pathlib workaround #534 pathlib._NormalAccessor wraps `open` in
# `staticmethod` for py < 3.7 but not 3.7. That means we get here with
# `file` being a pathlib._NormalAccessor object, and the other arguments
# shifted. Fortunately pathlib doesn't use the `dir_fd` argument, so we
# have space in the parameter list. We use some heuristics to detect this
# and adjust the parameters (without importing pathlib)
if type(file).__name__ == '_NormalAccessor':
file, flags, mode, dir_fd = flags, mode, dir_fd, None
if dir_fd is not None:
fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
else:
fd = __original_open__(file, flags, mode)
hubs.notify_opened(fd)
return fd

View File

@ -0,0 +1,257 @@
# Copyright (c) 2010, CCP Games
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of CCP Games nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module is API-equivalent to the standard library :mod:`profile` module
lbut it is greenthread-aware as well as thread-aware. Use this module
to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
FIXME: No testcases for this module.
"""
profile_orig = __import__('profile')
__all__ = profile_orig.__all__
from eventlet.patcher import slurp_properties
slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
import sys
import functools
from eventlet import greenthread
from eventlet import patcher
import _thread
thread = patcher.original(_thread.__name__) # non-monkeypatched module needed
# This class provides the start() and stop() functions
class Profile(profile_orig.Profile):
base = profile_orig.Profile
def __init__(self, timer=None, bias=None):
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.base.__init__(self, timer, bias)
self.sleeping = {}
def __call__(self, *args):
"""make callable, allowing an instance to be the profiler"""
self.dispatcher(*args)
def _setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def start(self, name="start"):
if getattr(self, "running", False):
return
self._setup()
self.simulate_call("start")
self.running = True
sys.setprofile(self.dispatcher)
def stop(self):
sys.setprofile(None)
self.running = False
self.TallyTimings()
# special cases for the original run commands, makin sure to
# clear the timer context.
def runctx(self, cmd, globals, locals):
if not getattr(self, "_has_setup", False):
self._setup()
try:
return profile_orig.Profile.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def runcall(self, func, *args, **kw):
if not getattr(self, "_has_setup", False):
self._setup()
try:
return profile_orig.Profile.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
def trace_dispatch_return_extend_back(self, frame, t):
"""A hack function to override error checking in parent class. It
allows invalid returns (where frames weren't preveiously entered into
the profiler) which can happen for all the tasklets that suddenly start
to get monitored. This means that the time will eventually be attributed
to a call high in the chain, when there is a tasklet switch
"""
if isinstance(self.cur[-2], Profile.fake_frame):
return False
self.trace_dispatch_call(frame, 0)
return self.trace_dispatch_return(frame, t)
def trace_dispatch_c_return_extend_back(self, frame, t):
# same for c return
if isinstance(self.cur[-2], Profile.fake_frame):
return False # ignore bogus returns
self.trace_dispatch_c_call(frame, 0)
return self.trace_dispatch_return(frame, t)
def SwitchTasklet(self, t0, t1, t):
# tally the time spent in the old tasklet
pt, it, et, fn, frame, rcur = self.cur
cur = (pt, it + t, et, fn, frame, rcur)
# we are switching to a new tasklet, store the old
self.sleeping[t0] = cur, self.timings
self.current_tasklet = t1
# find the new one
try:
self.cur, self.timings = self.sleeping.pop(t1)
except KeyError:
self.cur, self.timings = None, {}
self.simulate_call("profiler")
self.simulate_call("new_tasklet")
def TallyTimings(self):
oldtimings = self.sleeping
self.sleeping = {}
# first, unwind the main "cur"
self.cur = self.Unwind(self.cur, self.timings)
# we must keep the timings dicts separate for each tasklet, since it contains
# the 'ns' item, recursion count of each function in that tasklet. This is
# used in the Unwind dude.
for tasklet, (cur, timings) in oldtimings.items():
self.Unwind(cur, timings)
for k, v in timings.items():
if k not in self.timings:
self.timings[k] = v
else:
# accumulate all to the self.timings
cc, ns, tt, ct, callers = self.timings[k]
# ns should be 0 after unwinding
cc += v[0]
tt += v[2]
ct += v[3]
for k1, v1 in v[4].items():
callers[k1] = callers.get(k1, 0) + v1
self.timings[k] = cc, ns, tt, ct, callers
def Unwind(self, cur, timings):
"A function to unwind a 'cur' frame and tally the results"
"see profile.trace_dispatch_return() for details"
# also see simulate_cmd_complete()
while(cur[-1]):
rpt, rit, ret, rfn, frame, rcur = cur
frame_total = rit + ret
if rfn in timings:
cc, ns, tt, ct, callers = timings[rfn]
else:
cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
if not ns:
ct = ct + frame_total
cc = cc + 1
if rcur:
ppt, pit, pet, pfn, pframe, pcur = rcur
else:
pfn = None
if pfn in callers:
callers[pfn] = callers[pfn] + 1 # hack: gather more
elif pfn:
callers[pfn] = 1
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
ppt, pit, pet, pfn, pframe, pcur = rcur
rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
cur = rcur
return cur
def ContextWrap(f):
@functools.wraps(f)
def ContextWrapper(self, arg, t):
current = greenthread.getcurrent()
if current != self.current_tasklet:
self.SwitchTasklet(self.current_tasklet, current, t)
t = 0.0 # the time was billed to the previous tasklet
return f(self, arg, t)
return ContextWrapper
# Add "return safety" to the dispatchers
Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
'return': Profile.trace_dispatch_return_extend_back,
'c_return': Profile.trace_dispatch_c_return_extend_back,
})
# Add automatic tasklet detection to the callbacks.
Profile.dispatch = {k: ContextWrap(v) for k, v in Profile.dispatch.items()}
# run statements shamelessly stolen from profile.py
def run(statement, filename=None, sort=-1):
"""Run statement under profiler optionally saving results in filename
This function takes a single argument that can be passed to the
"exec" statement, and an optional file name. In all cases this
routine attempts to "exec" its first argument and gather profiling
statistics from the execution. If no file name is present, then this
function automatically prints a simple profiling report, sorted by the
standard name string (file/line/function-name) that is presented in
each line.
"""
prof = Profile()
try:
prof = prof.run(statement)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats(sort)
def runctx(statement, globals, locals, filename=None):
"""Run statement under profiler, supplying your own globals and locals,
optionally saving results in filename.
statement and filename have the same semantics as profile.run
"""
prof = Profile()
try:
prof = prof.runctx(statement, globals, locals)
except SystemExit:
pass
if filename is not None:
prof.dump_stats(filename)
else:
return prof.print_stats()

View File

@ -0,0 +1,86 @@
import eventlet
from eventlet.hubs import get_hub
__select = eventlet.patcher.original('select')
error = __select.error
__patched__ = ['select']
__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
def get_fileno(obj):
# The purpose of this function is to exactly replicate
# the behavior of the select module when confronted with
# abnormal filenos; the details are extensively tested in
# the stdlib test/test_select.py.
try:
f = obj.fileno
except AttributeError:
if not isinstance(obj, int):
raise TypeError("Expected int or long, got %s" % type(obj))
return obj
else:
rv = f()
if not isinstance(rv, int):
raise TypeError("Expected int or long, got %s" % type(rv))
return rv
def select(read_list, write_list, error_list, timeout=None):
# error checking like this is required by the stdlib unit tests
if timeout is not None:
try:
timeout = float(timeout)
except ValueError:
raise TypeError("Expected number for timeout")
hub = get_hub()
timers = []
current = eventlet.getcurrent()
if hub.greenlet is current:
raise RuntimeError('do not call blocking functions from the mainloop')
ds = {}
for r in read_list:
ds[get_fileno(r)] = {'read': r}
for w in write_list:
ds.setdefault(get_fileno(w), {})['write'] = w
for e in error_list:
ds.setdefault(get_fileno(e), {})['error'] = e
listeners = []
def on_read(d):
original = ds[get_fileno(d)]['read']
current.switch(([original], [], []))
def on_write(d):
original = ds[get_fileno(d)]['write']
current.switch(([], [original], []))
def on_timeout2():
current.switch(([], [], []))
def on_timeout():
# ensure that BaseHub.run() has a chance to call self.wait()
# at least once before timed out. otherwise the following code
# can time out erroneously.
#
# s1, s2 = socket.socketpair()
# print(select.select([], [s1], [], 0))
timers.append(hub.schedule_call_global(0, on_timeout2))
if timeout is not None:
timers.append(hub.schedule_call_global(timeout, on_timeout))
try:
for k, v in ds.items():
if v.get('read'):
listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
if v.get('write'):
listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
try:
return hub.switch()
finally:
for l in listeners:
hub.remove(l)
finally:
for t in timers:
t.cancel()

View File

@ -0,0 +1,34 @@
import sys
from eventlet import patcher
from eventlet.green import select
__patched__ = [
'DefaultSelector',
'SelectSelector',
]
# We only have green select so the options are:
# * leave it be and have selectors that block
# * try to pretend the "bad" selectors don't exist
# * replace all with SelectSelector for the price of possibly different
# performance characteristic and missing fileno() method (if someone
# uses it it'll result in a crash, we may want to implement it in the future)
#
# This module used to follow the third approach but just removing the offending
# selectors is less error prone and less confusing approach.
__deleted__ = [
'PollSelector',
'EpollSelector',
'DevpollSelector',
'KqueueSelector',
]
patcher.inject('selectors', globals(), ('select', select))
del patcher
if sys.platform != 'win32':
SelectSelector._select = staticmethod(select.select)
DefaultSelector = SelectSelector

View File

@ -0,0 +1,63 @@
import os
import sys
__import__('eventlet.green._socket_nodns')
__socket = sys.modules['eventlet.green._socket_nodns']
__all__ = __socket.__all__
__patched__ = __socket.__patched__ + [
'create_connection',
'getaddrinfo',
'gethostbyname',
'gethostbyname_ex',
'getnameinfo',
]
from eventlet.patcher import slurp_properties
slurp_properties(__socket, globals(), srckeys=dir(__socket))
if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
from eventlet.support import greendns
gethostbyname = greendns.gethostbyname
getaddrinfo = greendns.getaddrinfo
gethostbyname_ex = greendns.gethostbyname_ex
getnameinfo = greendns.getnameinfo
del greendns
def create_connection(address,
timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
err = "getaddrinfo returns an empty list"
host, port = address
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as e:
err = e
if sock is not None:
sock.close()
if not isinstance(err, error):
err = error(err)
raise err

View File

@ -0,0 +1,487 @@
__ssl = __import__('ssl')
from eventlet.patcher import slurp_properties
slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
import sys
from eventlet import greenio, hubs
from eventlet.greenio import (
GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
)
from eventlet.hubs import trampoline, IOClosed
from eventlet.support import get_errno, PY33
from contextlib import contextmanager
orig_socket = __import__('socket')
socket = orig_socket.socket
timeout_exc = orig_socket.timeout
__patched__ = [
'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
'create_default_context', '_create_default_https_context']
_original_sslsocket = __ssl.SSLSocket
_original_sslcontext = __ssl.SSLContext
_is_py_3_7 = sys.version_info[:2] == (3, 7)
_original_wrap_socket = __ssl.SSLContext.wrap_socket
@contextmanager
def _original_ssl_context(*args, **kwargs):
tmp_sslcontext = _original_wrap_socket.__globals__.get('SSLContext', None)
tmp_sslsocket = _original_sslsocket._create.__globals__.get('SSLSocket', None)
_original_sslsocket._create.__globals__['SSLSocket'] = _original_sslsocket
_original_wrap_socket.__globals__['SSLContext'] = _original_sslcontext
try:
yield
finally:
_original_wrap_socket.__globals__['SSLContext'] = tmp_sslcontext
_original_sslsocket._create.__globals__['SSLSocket'] = tmp_sslsocket
class GreenSSLSocket(_original_sslsocket):
""" This is a green version of the SSLSocket class from the ssl module added
in 2.6. For documentation on it, please see the Python standard
documentation.
Python nonblocking ssl objects don't give errors when the other end
of the socket is closed (they do notice when the other end is shutdown,
though). Any write/read operations will simply hang if the socket is
closed from the other end. There is no obvious fix for this problem;
it appears to be a limitation of Python's ssl object implementation.
A workaround is to set a reasonable timeout on the socket using
settimeout(), and to close/reopen the connection when a timeout
occurs at an unexpected juncture in the code.
"""
def __new__(cls, sock=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_TLS, ca_certs=None,
do_handshake_on_connect=True, *args, **kw):
if not isinstance(sock, GreenSocket):
sock = GreenSocket(sock)
with _original_ssl_context():
context = kw.get('_context')
if context:
ret = _original_sslsocket._create(
sock=sock.fd,
server_side=server_side,
do_handshake_on_connect=False,
suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True),
server_hostname=kw.get('server_hostname'),
context=context,
session=kw.get('session'),
)
else:
ret = cls._wrap_socket(
sock=sock.fd,
keyfile=keyfile,
certfile=certfile,
server_side=server_side,
cert_reqs=cert_reqs,
ssl_version=ssl_version,
ca_certs=ca_certs,
do_handshake_on_connect=False,
ciphers=kw.get('ciphers'),
)
ret.keyfile = keyfile
ret.certfile = certfile
ret.cert_reqs = cert_reqs
ret.ssl_version = ssl_version
ret.ca_certs = ca_certs
ret.__class__ = GreenSSLSocket
return ret
@staticmethod
def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs,
ssl_version, ca_certs, do_handshake_on_connect, ciphers):
context = _original_sslcontext(protocol=ssl_version)
context.options |= cert_reqs
if certfile or keyfile:
context.load_cert_chain(
certfile=certfile,
keyfile=keyfile,
)
if ca_certs:
context.load_verify_locations(ca_certs)
if ciphers:
context.set_ciphers(ciphers)
return context.wrap_socket(
sock=sock,
server_side=server_side,
do_handshake_on_connect=do_handshake_on_connect,
)
# we are inheriting from SSLSocket because its constructor calls
# do_handshake whose behavior we wish to override
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_TLS, ca_certs=None,
do_handshake_on_connect=True, *args, **kw):
if not isinstance(sock, GreenSocket):
sock = GreenSocket(sock)
self.act_non_blocking = sock.act_non_blocking
# the superclass initializer trashes the methods so we remove
# the local-object versions of them and let the actual class
# methods shine through
# Note: This for Python 2
try:
for fn in orig_socket._delegate_methods:
delattr(self, fn)
except AttributeError:
pass
# Python 3 SSLSocket construction process overwrites the timeout so restore it
self._timeout = sock.gettimeout()
# it also sets timeout to None internally apparently (tested with 3.4.2)
_original_sslsocket.settimeout(self, 0.0)
assert _original_sslsocket.gettimeout(self) == 0.0
# see note above about handshaking
self.do_handshake_on_connect = do_handshake_on_connect
if do_handshake_on_connect and self._connected:
self.do_handshake()
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def _call_trampolining(self, func, *a, **kw):
if self.act_non_blocking:
return func(*a, **kw)
else:
while True:
try:
return func(*a, **kw)
except SSLError as exc:
if get_errno(exc) == SSL_ERROR_WANT_READ:
trampoline(self,
read=True,
timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
trampoline(self,
write=True,
timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
elif _is_py_3_7 and "unexpected eof" in exc.args[1]:
# For reasons I don't understand on 3.7 we get [ssl:
# KRB5_S_TKT_NYV] unexpected eof while reading]
# errors...
raise IOClosed
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._call_trampolining(
super().write, data)
def read(self, len=1024, buffer=None):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._call_trampolining(
super().read, len, buffer)
except IOClosed:
if buffer is None:
return b''
else:
return 0
def send(self, data, flags=0):
if self._sslobj:
return self._call_trampolining(
super().send, data, flags)
else:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return socket.send(self, data, flags)
def sendto(self, data, addr, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
return socket.sendto(self, data, addr, flags)
def sendall(self, data, flags=0):
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
data_to_send = data
while (count < amount):
v = self.send(data_to_send)
count += v
if v == 0:
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
else:
data_to_send = data[count:]
return amount
else:
while True:
try:
return socket.sendall(self, data, flags)
except orig_socket.error as e:
if self.act_non_blocking:
raise
erno = get_errno(e)
if erno in greenio.SOCKET_BLOCKING:
trampoline(self, write=True,
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
elif erno in greenio.SOCKET_CLOSED:
return ''
raise
def recv(self, buflen=1024, flags=0):
return self._base_recv(buflen, flags, into=False)
def recv_into(self, buffer, nbytes=None, flags=0):
# Copied verbatim from CPython
if buffer and nbytes is None:
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
# end of CPython code
return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
def _base_recv(self, nbytes, flags, into, buffer_=None):
if into:
plain_socket_function = socket.recv_into
else:
plain_socket_function = socket.recv
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to %s() on %s" %
plain_socket_function.__name__, self.__class__)
if into:
read = self.read(nbytes, buffer_)
else:
read = self.read(nbytes)
return read
else:
while True:
try:
args = [self, nbytes, flags]
if into:
args.insert(1, buffer_)
return plain_socket_function(*args)
except orig_socket.error as e:
if self.act_non_blocking:
raise
erno = get_errno(e)
if erno in greenio.SOCKET_BLOCKING:
try:
trampoline(
self, read=True,
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
except IOClosed:
return b''
elif erno in greenio.SOCKET_CLOSED:
return b''
raise
def recvfrom(self, addr, buflen=1024, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
return super().recvfrom(addr, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if not self.act_non_blocking:
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
return super().recvfrom_into(buffer, nbytes, flags)
def unwrap(self):
return GreenSocket(self._call_trampolining(
super().unwrap))
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
return self._call_trampolining(
super().do_handshake)
def _socket_connect(self, addr):
real_connect = socket.connect
if self.act_non_blocking:
return real_connect(self, addr)
else:
clock = hubs.get_hub().clock
# *NOTE: gross, copied code from greenio because it's not factored
# well enough to reuse
if self.gettimeout() is None:
while True:
try:
return real_connect(self, addr)
except orig_socket.error as exc:
if get_errno(exc) in CONNECT_ERR:
trampoline(self, write=True)
elif get_errno(exc) in CONNECT_SUCCESS:
return
else:
raise
else:
end = clock() + self.gettimeout()
while True:
try:
real_connect(self, addr)
except orig_socket.error as exc:
if get_errno(exc) in CONNECT_ERR:
trampoline(
self, write=True,
timeout=end - clock(), timeout_exc=timeout_exc('timed out'))
elif get_errno(exc) in CONNECT_SUCCESS:
return
else:
raise
if clock() >= end:
raise timeout_exc('timed out')
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# *NOTE: grrrrr copied this code from ssl.py because of the reference
# to socket.connect which we don't want to call directly
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._socket_connect(addr)
server_side = False
try:
sslwrap = _ssl.sslwrap
except AttributeError:
# sslwrap was removed in 3.x and later in 2.7.9
context = self.context if PY33 else self._context
sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname)
else:
sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs, *self.ciphers)
try:
# This is added in Python 3.5, http://bugs.python.org/issue21965
SSLObject
except NameError:
self._sslobj = sslobj
else:
self._sslobj = sslobj
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
# RDW grr duplication of code from greenio
if self.act_non_blocking:
newsock, addr = socket.accept(self)
else:
while True:
try:
newsock, addr = socket.accept(self)
break
except orig_socket.error as e:
if get_errno(e) not in greenio.SOCKET_BLOCKING:
raise
trampoline(self, read=True, timeout=self.gettimeout(),
timeout_exc=timeout_exc('timed out'))
new_ssl = type(self)(
newsock,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=self.suppress_ragged_eofs,
_context=self._context,
)
return (new_ssl, addr)
def dup(self):
raise NotImplementedError("Can't dup an ssl object")
SSLSocket = GreenSSLSocket
def wrap_socket(sock, *a, **kw):
return GreenSSLSocket(sock, *a, **kw)
class GreenSSLContext(_original_sslcontext):
__slots__ = ()
def wrap_socket(self, sock, *a, **kw):
return GreenSSLSocket(sock, *a, _context=self, **kw)
# https://github.com/eventlet/eventlet/issues/371
# Thanks to Gevent developers for sharing patch to this problem.
if hasattr(_original_sslcontext.options, 'setter'):
# In 3.6, these became properties. They want to access the
# property __set__ method in the superclass, and they do so by using
# super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
# patch, which causes infinite recursion.
# https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
@_original_sslcontext.options.setter
def options(self, value):
super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
@_original_sslcontext.verify_flags.setter
def verify_flags(self, value):
super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
@_original_sslcontext.verify_mode.setter
def verify_mode(self, value):
super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
if hasattr(_original_sslcontext, "maximum_version"):
@_original_sslcontext.maximum_version.setter
def maximum_version(self, value):
super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value)
if hasattr(_original_sslcontext, "minimum_version"):
@_original_sslcontext.minimum_version.setter
def minimum_version(self, value):
super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value)
SSLContext = GreenSSLContext
# TODO: ssl.create_default_context() was added in 2.7.9.
# Not clear we're still trying to support Python versions even older than that.
if hasattr(__ssl, 'create_default_context'):
_original_create_default_context = __ssl.create_default_context
def green_create_default_context(*a, **kw):
# We can't just monkey-patch on the green version of `wrap_socket`
# on to SSLContext instances, but SSLContext.create_default_context
# does a bunch of work. Rather than re-implementing it all, just
# switch out the __class__ to get our `wrap_socket` implementation
context = _original_create_default_context(*a, **kw)
context.__class__ = GreenSSLContext
return context
create_default_context = green_create_default_context
_create_default_https_context = green_create_default_context

View File

@ -0,0 +1,137 @@
import errno
import sys
from types import FunctionType
import eventlet
from eventlet import greenio
from eventlet import patcher
from eventlet.green import select, threading, time
__patched__ = ['call', 'check_call', 'Popen']
to_patch = [('select', select), ('threading', threading), ('time', time)]
from eventlet.green import selectors
to_patch.append(('selectors', selectors))
patcher.inject('subprocess', globals(), *to_patch)
subprocess_orig = patcher.original("subprocess")
subprocess_imported = sys.modules.get('subprocess', subprocess_orig)
mswindows = sys.platform == "win32"
if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
# Backported from Python 3.3.
# https://bitbucket.org/eventlet/eventlet/issue/89
class TimeoutExpired(Exception):
"""This exception is raised when the timeout expires while waiting for
a child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
else:
TimeoutExpired = subprocess_imported.TimeoutExpired
# This is the meat of this module, the green version of Popen.
class Popen(subprocess_orig.Popen):
"""eventlet-friendly version of subprocess.Popen"""
# We do not believe that Windows pipes support non-blocking I/O. At least,
# the Python file objects stored on our base-class object have no
# setblocking() method, and the Python fcntl module doesn't exist on
# Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
# this __init__() override is to wrap the pipes for eventlet-friendly
# non-blocking I/O, don't even bother overriding it on Windows.
if not mswindows:
def __init__(self, args, bufsize=0, *argss, **kwds):
self.args = args
# Forward the call to base-class constructor
subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
# Now wrap the pipes, if any. This logic is loosely borrowed from
# eventlet.processes.Process.run() method.
for attr in "stdin", "stdout", "stderr":
pipe = getattr(self, attr)
if pipe is not None and type(pipe) != greenio.GreenPipe:
# https://github.com/eventlet/eventlet/issues/243
# AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
mode = getattr(pipe, 'mode', '')
if not mode:
if pipe.readable():
mode += 'r'
if pipe.writable():
mode += 'w'
# ValueError: can't have unbuffered text I/O
if bufsize == 0:
bufsize = -1
wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
setattr(self, attr, wrapped_pipe)
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
def wait(self, timeout=None, check_interval=0.01):
# Instead of a blocking OS call, this version of wait() uses logic
# borrowed from the eventlet 0.2 processes.Process.wait() method.
if timeout is not None:
endtime = time.time() + timeout
try:
while True:
status = self.poll()
if status is not None:
return status
if timeout is not None and time.time() > endtime:
raise TimeoutExpired(self.args, timeout)
eventlet.sleep(check_interval)
except OSError as e:
if e.errno == errno.ECHILD:
# no child process, this happens if the child process
# already died and has been cleaned up
return -1
else:
raise
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
if not mswindows:
# don't want to rewrite the original _communicate() method, we
# just want a version that uses eventlet.green.select.select()
# instead of select.select().
_communicate = FunctionType(
subprocess_orig.Popen._communicate.__code__,
globals())
try:
_communicate_with_select = FunctionType(
subprocess_orig.Popen._communicate_with_select.__code__,
globals())
_communicate_with_poll = FunctionType(
subprocess_orig.Popen._communicate_with_poll.__code__,
globals())
except AttributeError:
pass
# Borrow subprocess.call() and check_call(), but patch them so they reference
# OUR Popen class rather than subprocess.Popen.
def patched_function(function):
new_function = FunctionType(function.__code__, globals())
new_function.__kwdefaults__ = function.__kwdefaults__
new_function.__defaults__ = function.__defaults__
return new_function
call = patched_function(subprocess_orig.call)
check_call = patched_function(subprocess_orig.check_call)
# check_output is Python 2.7+
if hasattr(subprocess_orig, 'check_output'):
__patched__.append('check_output')
check_output = patched_function(subprocess_orig.check_output)
del patched_function
# Keep exceptions identity.
# https://github.com/eventlet/eventlet/issues/413
CalledProcessError = subprocess_imported.CalledProcessError
del subprocess_imported

View File

@ -0,0 +1,176 @@
"""Implements the standard thread module, using greenthreads."""
import _thread as __thread
from eventlet.support import greenlets as greenlet
from eventlet import greenthread
from eventlet.timeout import with_timeout
from eventlet.lock import Lock
import sys
__patched__ = ['Lock', 'LockType', '_ThreadHandle', '_count',
'_get_main_thread_ident', '_local', '_make_thread_handle',
'allocate', 'allocate_lock', 'exit', 'get_ident',
'interrupt_main', 'stack_size', 'start_joinable_thread',
'start_new', 'start_new_thread']
error = __thread.error
LockType = Lock
__threadcount = 0
if hasattr(__thread, "_is_main_interpreter"):
_is_main_interpreter = __thread._is_main_interpreter
def _set_sentinel():
# TODO this is a dummy code, reimplementing this may be needed:
# https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
return allocate_lock()
TIMEOUT_MAX = __thread.TIMEOUT_MAX
def _count():
return __threadcount
def get_ident(gr=None):
if gr is None:
return id(greenlet.getcurrent())
else:
return id(gr)
def __thread_body(func, args, kwargs):
global __threadcount
__threadcount += 1
try:
func(*args, **kwargs)
finally:
__threadcount -= 1
class _ThreadHandle:
def __init__(self, greenthread=None):
self._greenthread = greenthread
self._done = False
def _set_done(self):
self._done = True
def is_done(self):
return self._done
@property
def ident(self):
return get_ident(self._greenthread)
def join(self, timeout=None):
if not hasattr(self._greenthread, "wait"):
return
if timeout is not None:
return with_timeout(timeout, self._greenthread.wait)
return self._greenthread.wait()
def _make_thread_handle(ident):
greenthread = greenlet.getcurrent()
assert ident == get_ident(greenthread)
return _ThreadHandle(greenthread=greenthread)
def __spawn_green(function, args=(), kwargs=None, joinable=False):
if ((3, 4) <= sys.version_info < (3, 13)
and getattr(function, '__module__', '') == 'threading'
and hasattr(function, '__self__')):
# In Python 3.4-3.12, threading.Thread uses an internal lock
# automatically released when the python thread state is deleted.
# With monkey patching, eventlet uses green threads without python
# thread state, so the lock is not automatically released.
#
# Wrap _bootstrap_inner() to release explicitly the thread state lock
# when the thread completes.
thread = function.__self__
bootstrap_inner = thread._bootstrap_inner
def wrap_bootstrap_inner():
try:
bootstrap_inner()
finally:
# The lock can be cleared (ex: by a fork())
if getattr(thread, "_tstate_lock", None) is not None:
thread._tstate_lock.release()
thread._bootstrap_inner = wrap_bootstrap_inner
kwargs = kwargs or {}
spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
return spawn_func(__thread_body, function, args, kwargs)
def start_joinable_thread(function, handle=None, daemon=True):
g = __spawn_green(function, joinable=True)
if handle is None:
handle = _ThreadHandle(greenthread=g)
else:
handle._greenthread = g
return handle
def start_new_thread(function, args=(), kwargs=None):
g = __spawn_green(function, args=args, kwargs=kwargs)
return get_ident(g)
start_new = start_new_thread
def _get_main_thread_ident():
greenthread = greenlet.getcurrent()
while greenthread.parent is not None:
greenthread = greenthread.parent
return get_ident(greenthread)
def allocate_lock(*a):
return LockType(1)
allocate = allocate_lock
def exit():
raise greenlet.GreenletExit
exit_thread = __thread.exit_thread
def interrupt_main():
curr = greenlet.getcurrent()
if curr.parent and not curr.parent.dead:
curr.parent.throw(KeyboardInterrupt())
else:
raise KeyboardInterrupt()
if hasattr(__thread, 'stack_size'):
__original_stack_size__ = __thread.stack_size
def stack_size(size=None):
if size is None:
return __original_stack_size__()
if size > __original_stack_size__():
return __original_stack_size__(size)
else:
pass
# not going to decrease stack_size, because otherwise other greenlets in
# this thread will suffer
from eventlet.corolocal import local as _local
if hasattr(__thread, 'daemon_threads_allowed'):
daemon_threads_allowed = __thread.daemon_threads_allowed
if hasattr(__thread, '_shutdown'):
_shutdown = __thread._shutdown

View File

@ -0,0 +1,132 @@
"""Implements the standard threading module, using greenthreads."""
import eventlet
from eventlet.green import thread
from eventlet.green import time
from eventlet.support import greenlets as greenlet
__patched__ = ['Lock', '_after_fork', '_allocate_lock', '_get_main_thread_ident',
'_make_thread_handle', '_shutdown', '_sleep',
'_start_joinable_thread', '_start_new_thread', '_ThreadHandle',
'currentThread', 'current_thread', 'local', 'stack_size']
__patched__ += ['get_ident', '_set_sentinel']
__orig_threading = eventlet.patcher.original('threading')
__threadlocal = __orig_threading.local()
__patched_enumerate = None
eventlet.patcher.inject(
'threading',
globals(),
('_thread', thread),
('time', time))
_count = 1
class _GreenThread:
"""Wrapper for GreenThread objects to provide Thread-like attributes
and methods"""
def __init__(self, g):
global _count
self._g = g
self._name = 'GreenThread-%d' % _count
_count += 1
def __repr__(self):
return '<_GreenThread(%s, %r)>' % (self._name, self._g)
def join(self, timeout=None):
return self._g.wait()
def getName(self):
return self._name
get_name = getName
def setName(self, name):
self._name = str(name)
set_name = setName
name = property(getName, setName)
ident = property(lambda self: id(self._g))
def isAlive(self):
return True
is_alive = isAlive
daemon = property(lambda self: True)
def isDaemon(self):
return self.daemon
is_daemon = isDaemon
__threading = None
def _fixup_thread(t):
# Some third-party packages (lockfile) will try to patch the
# threading.Thread class with a get_name attribute if it doesn't
# exist. Since we might return Thread objects from the original
# threading package that won't get patched, let's make sure each
# individual object gets patched too our patched threading.Thread
# class has been patched. This is why monkey patching can be bad...
global __threading
if not __threading:
__threading = __import__('threading')
if (hasattr(__threading.Thread, 'get_name') and
not hasattr(t, 'get_name')):
t.get_name = t.getName
return t
def current_thread():
global __patched_enumerate
g = greenlet.getcurrent()
if not g:
# Not currently in a greenthread, fall back to standard function
return _fixup_thread(__orig_threading.current_thread())
try:
active = __threadlocal.active
except AttributeError:
active = __threadlocal.active = {}
g_id = id(g)
t = active.get(g_id)
if t is not None:
return t
# FIXME: move import from function body to top
# (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from
# threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call
# and b) was hot-patched using patch_function().
# https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165
if __patched_enumerate is None:
__patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate)
found = [th for th in __patched_enumerate() if th.ident == g_id]
if found:
return found[0]
# Add green thread to active if we can clean it up on exit
def cleanup(g):
del active[g_id]
try:
g.link(cleanup)
except AttributeError:
# Not a GreenThread type, so there's no way to hook into
# the green thread exiting. Fall back to the standard
# function then.
t = _fixup_thread(__orig_threading.current_thread())
else:
t = active[g_id] = _GreenThread(g)
return t
currentThread = current_thread

View File

@ -0,0 +1,6 @@
__time = __import__('time')
from eventlet.patcher import slurp_properties
__patched__ = ['sleep']
slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
from eventlet.greenthread import sleep
sleep # silence pyflakes

View File

@ -0,0 +1,5 @@
from eventlet import patcher
from eventlet.green import socket
from eventlet.green import time
from eventlet.green import httplib
from eventlet.green import ftplib

View File

@ -0,0 +1,4 @@
from eventlet import patcher
from eventlet.green.urllib import response
patcher.inject('urllib.error', globals(), ('urllib.response', response))
del patcher

View File

@ -0,0 +1,3 @@
from eventlet import patcher
patcher.inject('urllib.parse', globals())
del patcher

View File

@ -0,0 +1,50 @@
from eventlet import patcher
from eventlet.green import ftplib, http, os, socket, time
from eventlet.green.http import client as http_client
from eventlet.green.urllib import error, parse, response
# TODO should we also have green email version?
# import email
to_patch = [
# This (http module) is needed here, otherwise test__greenness hangs
# forever on Python 3 because parts of non-green http (including
# http.client) leak into our patched urllib.request. There may be a nicer
# way to handle this (I didn't dig too deep) but this does the job. Jakub
('http', http),
('http.client', http_client),
('os', os),
('socket', socket),
('time', time),
('urllib.error', error),
('urllib.parse', parse),
('urllib.response', response),
]
try:
from eventlet.green import ssl
except ImportError:
pass
else:
to_patch.append(('ssl', ssl))
patcher.inject('urllib.request', globals(), *to_patch)
del to_patch
to_patch_in_functions = [('ftplib', ftplib)]
del ftplib
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
del error
del parse
del response
del to_patch_in_functions

View File

@ -0,0 +1,3 @@
from eventlet import patcher
patcher.inject('urllib.response', globals())
del patcher

View File

@ -0,0 +1,20 @@
from eventlet import patcher
from eventlet.green import ftplib
from eventlet.green import httplib
from eventlet.green import socket
from eventlet.green import ssl
from eventlet.green import time
from eventlet.green import urllib
patcher.inject(
'urllib2',
globals(),
('httplib', httplib),
('socket', socket),
('ssl', ssl),
('time', time),
('urllib', urllib))
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
del patcher

View File

@ -0,0 +1,465 @@
"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
found in :mod:`pyzmq <zmq>` to be non blocking.
"""
__zmq__ = __import__('zmq')
import eventlet.hubs
from eventlet.patcher import slurp_properties
from eventlet.support import greenlets as greenlet
__patched__ = ['Context', 'Socket']
slurp_properties(__zmq__, globals(), ignore=__patched__)
from collections import deque
try:
# alias XREQ/XREP to DEALER/ROUTER if available
if not hasattr(__zmq__, 'XREQ'):
XREQ = DEALER
if not hasattr(__zmq__, 'XREP'):
XREP = ROUTER
except NameError:
pass
class LockReleaseError(Exception):
pass
class _QueueLock:
"""A Lock that can be acquired by at most one thread. Any other
thread calling acquire will be blocked in a queue. When release
is called, the threads are awoken in the order they blocked,
one at a time. This lock can be required recursively by the same
thread."""
def __init__(self):
self._waiters = deque()
self._count = 0
self._holder = None
self._hub = eventlet.hubs.get_hub()
def __nonzero__(self):
return bool(self._count)
__bool__ = __nonzero__
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
def acquire(self):
current = greenlet.getcurrent()
if (self._waiters or self._count > 0) and self._holder is not current:
# block until lock is free
self._waiters.append(current)
self._hub.switch()
w = self._waiters.popleft()
assert w is current, 'Waiting threads woken out of order'
assert self._count == 0, 'After waking a thread, the lock must be unacquired'
self._holder = current
self._count += 1
def release(self):
if self._count <= 0:
raise LockReleaseError("Cannot release unacquired lock")
self._count -= 1
if self._count == 0:
self._holder = None
if self._waiters:
# wake next
self._hub.schedule_call_global(0, self._waiters[0].switch)
class _BlockedThread:
"""Is either empty, or represents a single blocked thread that
blocked itself by calling the block() method. The thread can be
awoken by calling wake(). Wake() can be called multiple times and
all but the first call will have no effect."""
def __init__(self):
self._blocked_thread = None
self._wakeupper = None
self._hub = eventlet.hubs.get_hub()
def __nonzero__(self):
return self._blocked_thread is not None
__bool__ = __nonzero__
def block(self, deadline=None):
if self._blocked_thread is not None:
raise Exception("Cannot block more than one thread on one BlockedThread")
self._blocked_thread = greenlet.getcurrent()
if deadline is not None:
self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
try:
self._hub.switch()
finally:
self._blocked_thread = None
# cleanup the wakeup task
if self._wakeupper is not None:
# Important to cancel the wakeup task so it doesn't
# spuriously wake this greenthread later on.
self._wakeupper.cancel()
self._wakeupper = None
def wake(self):
"""Schedules the blocked thread to be awoken and return
True. If wake has already been called or if there is no
blocked thread, then this call has no effect and returns
False."""
if self._blocked_thread is not None and self._wakeupper is None:
self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
return True
return False
class Context(__zmq__.Context):
"""Subclass of :class:`zmq.Context`
"""
def socket(self, socket_type):
"""Overridden method to ensure that the green version of socket is used
Behaves the same as :meth:`zmq.Context.socket`, but ensures
that a :class:`Socket` with all of its send and recv methods set to be
non-blocking is returned
"""
if self.closed:
raise ZMQError(ENOTSUP)
return Socket(self, socket_type)
def _wraps(source_fn):
"""A decorator that copies the __name__ and __doc__ from the given
function
"""
def wrapper(dest_fn):
dest_fn.__name__ = source_fn.__name__
dest_fn.__doc__ = source_fn.__doc__
return dest_fn
return wrapper
# Implementation notes: Each socket in 0mq contains a pipe that the
# background IO threads use to communicate with the socket. These
# events are important because they tell the socket when it is able to
# send and when it has messages waiting to be received. The read end
# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
#
# Events are read from the socket's event pipe only on the thread that
# the 0mq context is associated with, which is the native thread the
# greenthreads are running on, and the only operations that cause the
# events to be read and processed are send(), recv() and
# getsockopt(zmq.EVENTS). This means that after doing any of these
# three operations, the ability of the socket to send or receive a
# message without blocking may have changed, but after the events are
# read the FD is no longer readable so the hub may not signal our
# listener.
#
# If we understand that after calling send() a message might be ready
# to be received and that after calling recv() a message might be able
# to be sent, what should we do next? There are two approaches:
#
# 1. Always wake the other thread if there is one waiting. This
# wakeup may be spurious because the socket might not actually be
# ready for a send() or recv(). However, if a thread is in a
# tight-loop successfully calling send() or recv() then the wakeups
# are naturally batched and there's very little cost added to each
# send/recv call.
#
# or
#
# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
# thread should be woken up. This avoids spurious wake-ups but may
# add overhead because getsockopt will cause all events to be
# processed, whereas send and recv throttle processing
# events. Admittedly, all of the events will need to be processed
# eventually, but it is likely faster to batch the processing.
#
# Which approach is better? I have no idea.
#
# TODO:
# - Support MessageTrackers and make MessageTracker.wait green
_Socket = __zmq__.Socket
_Socket_recv = _Socket.recv
_Socket_send = _Socket.send
_Socket_send_multipart = _Socket.send_multipart
_Socket_recv_multipart = _Socket.recv_multipart
_Socket_send_string = _Socket.send_string
_Socket_recv_string = _Socket.recv_string
_Socket_send_pyobj = _Socket.send_pyobj
_Socket_recv_pyobj = _Socket.recv_pyobj
_Socket_send_json = _Socket.send_json
_Socket_recv_json = _Socket.recv_json
_Socket_getsockopt = _Socket.getsockopt
class Socket(_Socket):
"""Green version of :class:``zmq.core.socket.Socket``.
The following three methods are always overridden:
* send
* recv
* getsockopt
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
is deferred to the hub (using :func:``eventlet.hubs.trampoline``) if a
``zmq.EAGAIN`` (retry) error is raised.
For some socket types, the following methods are also overridden:
* send_multipart
* recv_multipart
"""
def __init__(self, context, socket_type):
super().__init__(context, socket_type)
self.__dict__['_eventlet_send_event'] = _BlockedThread()
self.__dict__['_eventlet_recv_event'] = _BlockedThread()
self.__dict__['_eventlet_send_lock'] = _QueueLock()
self.__dict__['_eventlet_recv_lock'] = _QueueLock()
def event(fd):
# Some events arrived at the zmq socket. This may mean
# there's a message that can be read or there's space for
# a message to be written.
send_wake = self._eventlet_send_event.wake()
recv_wake = self._eventlet_recv_event.wake()
if not send_wake and not recv_wake:
# if no waiting send or recv thread was woken up, then
# force the zmq socket's events to be processed to
# avoid repeated wakeups
_Socket_getsockopt(self, EVENTS)
hub = eventlet.hubs.get_hub()
self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
self.getsockopt(FD),
event,
lambda _: None,
lambda: None)
self.__dict__['_eventlet_clock'] = hub.clock
@_wraps(_Socket.close)
def close(self, linger=None):
super().close(linger)
if self._eventlet_listener is not None:
eventlet.hubs.get_hub().remove(self._eventlet_listener)
self.__dict__['_eventlet_listener'] = None
# wake any blocked threads
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
@_wraps(_Socket.getsockopt)
def getsockopt(self, option):
result = _Socket_getsockopt(self, option)
if option == EVENTS:
# Getting the events causes the zmq socket to process
# events which may mean a msg can be sent or received. If
# there is a greenthread blocked and waiting for events,
# it will miss the edge-triggered read event, so wake it
# up.
if (result & POLLOUT):
self._eventlet_send_event.wake()
if (result & POLLIN):
self._eventlet_recv_event.wake()
return result
@_wraps(_Socket.send)
def send(self, msg, flags=0, copy=True, track=False):
"""A send method that's safe to use when multiple greenthreads
are calling send, send_multipart, recv and recv_multipart on
the same socket.
"""
if flags & NOBLOCK:
result = _Socket_send(self, msg, flags, copy, track)
# Instead of calling both wake methods, could call
# self.getsockopt(EVENTS) which would trigger wakeups if
# needed.
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
return result
# TODO: pyzmq will copy the message buffer and create Message
# objects under some circumstances. We could do that work here
# once to avoid doing it every time the send is retried.
flags |= NOBLOCK
with self._eventlet_send_lock:
while True:
try:
return _Socket_send(self, msg, flags, copy, track)
except ZMQError as e:
if e.errno == EAGAIN:
self._eventlet_send_event.block()
else:
raise
finally:
# The call to send processes 0mq events and may
# make the socket ready to recv. Wake the next
# receiver. (Could check EVENTS for POLLIN here)
self._eventlet_recv_event.wake()
@_wraps(_Socket.send_multipart)
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
"""A send_multipart method that's safe to use when multiple
greenthreads are calling send, send_multipart, recv and
recv_multipart on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
@_wraps(_Socket.send_string)
def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
"""A send_string method that's safe to use when multiple
greenthreads are calling send, send_string, recv and
recv_string on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_string(self, u, flags, copy, encoding)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_string(self, u, flags, copy, encoding)
@_wraps(_Socket.send_pyobj)
def send_pyobj(self, obj, flags=0, protocol=2):
"""A send_pyobj method that's safe to use when multiple
greenthreads are calling send, send_pyobj, recv and
recv_pyobj on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_pyobj(self, obj, flags, protocol)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_pyobj(self, obj, flags, protocol)
@_wraps(_Socket.send_json)
def send_json(self, obj, flags=0, **kwargs):
"""A send_json method that's safe to use when multiple
greenthreads are calling send, send_json, recv and
recv_json on the same socket.
"""
if flags & NOBLOCK:
return _Socket_send_json(self, obj, flags, **kwargs)
# acquire lock here so the subsequent calls to send for the
# message parts after the first don't block
with self._eventlet_send_lock:
return _Socket_send_json(self, obj, flags, **kwargs)
@_wraps(_Socket.recv)
def recv(self, flags=0, copy=True, track=False):
"""A recv method that's safe to use when multiple greenthreads
are calling send, send_multipart, recv and recv_multipart on
the same socket.
"""
if flags & NOBLOCK:
msg = _Socket_recv(self, flags, copy, track)
# Instead of calling both wake methods, could call
# self.getsockopt(EVENTS) which would trigger wakeups if
# needed.
self._eventlet_send_event.wake()
self._eventlet_recv_event.wake()
return msg
deadline = None
if hasattr(__zmq__, 'RCVTIMEO'):
sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
if sock_timeout == -1:
pass
elif sock_timeout > 0:
deadline = self._eventlet_clock() + sock_timeout / 1000.0
else:
raise ValueError(sock_timeout)
flags |= NOBLOCK
with self._eventlet_recv_lock:
while True:
try:
return _Socket_recv(self, flags, copy, track)
except ZMQError as e:
if e.errno == EAGAIN:
# zmq in its wisdom decided to reuse EAGAIN for timeouts
if deadline is not None and self._eventlet_clock() > deadline:
e.is_timeout = True
raise
self._eventlet_recv_event.block(deadline=deadline)
else:
raise
finally:
# The call to recv processes 0mq events and may
# make the socket ready to send. Wake the next
# receiver. (Could check EVENTS for POLLOUT here)
self._eventlet_send_event.wake()
@_wraps(_Socket.recv_multipart)
def recv_multipart(self, flags=0, copy=True, track=False):
"""A recv_multipart method that's safe to use when multiple
greenthreads are calling send, send_multipart, recv and
recv_multipart on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_multipart(self, flags, copy, track)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_multipart(self, flags, copy, track)
@_wraps(_Socket.recv_string)
def recv_string(self, flags=0, encoding='utf-8'):
"""A recv_string method that's safe to use when multiple
greenthreads are calling send, send_string, recv and
recv_string on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_string(self, flags, encoding)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_string(self, flags, encoding)
@_wraps(_Socket.recv_json)
def recv_json(self, flags=0, **kwargs):
"""A recv_json method that's safe to use when multiple
greenthreads are calling send, send_json, recv and
recv_json on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_json(self, flags, **kwargs)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_json(self, flags, **kwargs)
@_wraps(_Socket.recv_pyobj)
def recv_pyobj(self, flags=0):
"""A recv_pyobj method that's safe to use when multiple
greenthreads are calling send, send_pyobj, recv and
recv_pyobj on the same socket.
"""
if flags & NOBLOCK:
return _Socket_recv_pyobj(self, flags)
# acquire lock here so the subsequent calls to recv for the
# message parts after the first don't block
with self._eventlet_recv_lock:
return _Socket_recv_pyobj(self, flags)

View File

@ -0,0 +1,3 @@
from eventlet.greenio.base import * # noqa
from eventlet.greenio.py3 import * # noqa

View File

@ -0,0 +1,492 @@
import errno
import os
import socket
import sys
import time
import warnings
import eventlet
from eventlet.hubs import trampoline, notify_opened, IOClosed
from eventlet.support import get_errno
__all__ = [
'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
'shutdown_safe', 'SSL',
'socket_timeout',
]
BUFFER_SIZE = 4096
CONNECT_ERR = {errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK}
CONNECT_SUCCESS = {0, errno.EISCONN}
if sys.platform[:3] == "win":
CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
_original_socket = eventlet.patcher.original('socket').socket
if sys.version_info >= (3, 10):
socket_timeout = socket.timeout # Really, TimeoutError
else:
socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
def socket_connect(descriptor, address):
"""
Attempts to connect to the address, returns the descriptor if it succeeds,
returns None if it needs to trampoline, and raises any exceptions.
"""
err = descriptor.connect_ex(address)
if err in CONNECT_ERR:
return None
if err not in CONNECT_SUCCESS:
raise OSError(err, errno.errorcode[err])
return descriptor
def socket_checkerr(descriptor):
err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err not in CONNECT_SUCCESS:
raise OSError(err, errno.errorcode[err])
def socket_accept(descriptor):
"""
Attempts to accept() on the descriptor, returns a client,address tuple
if it succeeds; returns None if it needs to trampoline, and raises
any exceptions.
"""
try:
return descriptor.accept()
except OSError as e:
if get_errno(e) == errno.EWOULDBLOCK:
return None
raise
if sys.platform[:3] == "win":
# winsock sometimes throws ENOTCONN
SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK}
SOCKET_CLOSED = {errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN}
else:
# oddly, on linux/darwin, an unconnected socket is expected to block,
# so we treat ENOTCONN the same as EWOULDBLOCK
SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN}
SOCKET_CLOSED = {errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE}
def set_nonblocking(fd):
"""
Sets the descriptor to be nonblocking. Works on many file-like
objects as well as sockets. Only sockets can be nonblocking on
Windows, however.
"""
try:
setblocking = fd.setblocking
except AttributeError:
# fd has no setblocking() method. It could be that this version of
# Python predates socket.setblocking(). In that case, we can still set
# the flag "by hand" on the underlying OS fileno using the fcntl
# module.
try:
import fcntl
except ImportError:
# Whoops, Windows has no fcntl module. This might not be a socket
# at all, but rather a file-like object with no setblocking()
# method. In particular, on Windows, pipes don't support
# non-blocking I/O and therefore don't have that method. Which
# means fcntl wouldn't help even if we could load it.
raise NotImplementedError("set_nonblocking() on a file object "
"with no setblocking() method "
"(Windows pipes don't support non-blocking I/O)")
# We managed to import fcntl.
fileno = fd.fileno()
orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
new_flags = orig_flags | os.O_NONBLOCK
if new_flags != orig_flags:
fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
else:
# socket supports setblocking()
setblocking(0)
try:
from socket import _GLOBAL_DEFAULT_TIMEOUT
except ImportError:
_GLOBAL_DEFAULT_TIMEOUT = object()
class GreenSocket:
"""
Green version of socket.socket class, that is intended to be 100%
API-compatible.
It also recognizes the keyword parameter, 'set_nonblocking=True'.
Pass False to indicate that socket is already in non-blocking mode
to save syscalls.
"""
# This placeholder is to prevent __getattr__ from creating an infinite call loop
fd = None
def __init__(self, family=socket.AF_INET, *args, **kwargs):
should_set_nonblocking = kwargs.pop('set_nonblocking', True)
if isinstance(family, int):
fd = _original_socket(family, *args, **kwargs)
# Notify the hub that this is a newly-opened socket.
notify_opened(fd.fileno())
else:
fd = family
# import timeout from other socket, if it was there
try:
self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
except AttributeError:
self._timeout = socket.getdefaulttimeout()
# Filter fd.fileno() != -1 so that won't call set non-blocking on
# closed socket
if should_set_nonblocking and fd.fileno() != -1:
set_nonblocking(fd)
self.fd = fd
# when client calls setblocking(0) or settimeout(0) the socket must
# act non-blocking
self.act_non_blocking = False
# Copy some attributes from underlying real socket.
# This is the easiest way that i found to fix
# https://bitbucket.org/eventlet/eventlet/issue/136
# Only `getsockopt` is required to fix that issue, others
# are just premature optimization to save __getattr__ call.
self.bind = fd.bind
self.close = fd.close
self.fileno = fd.fileno
self.getsockname = fd.getsockname
self.getsockopt = fd.getsockopt
self.listen = fd.listen
self.setsockopt = fd.setsockopt
self.shutdown = fd.shutdown
self._closed = False
@property
def _sock(self):
return self
def _get_io_refs(self):
return self.fd._io_refs
def _set_io_refs(self, value):
self.fd._io_refs = value
_io_refs = property(_get_io_refs, _set_io_refs)
# Forward unknown attributes to fd, cache the value for future use.
# I do not see any simple attribute which could be changed
# so caching everything in self is fine.
# If we find such attributes - only attributes having __get__ might be cached.
# For now - I do not want to complicate it.
def __getattr__(self, name):
if self.fd is None:
raise AttributeError(name)
attr = getattr(self.fd, name)
setattr(self, name, attr)
return attr
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
""" We need to trampoline via the event hub.
We catch any signal back from the hub indicating that the operation we
were waiting on was associated with a filehandle that's since been
invalidated.
"""
if self._closed:
# If we did any logging, alerting to a second trampoline attempt on a closed
# socket here would be useful.
raise IOClosed()
try:
return trampoline(fd, read=read, write=write, timeout=timeout,
timeout_exc=timeout_exc,
mark_as_closed=self._mark_as_closed)
except IOClosed:
# This socket's been obsoleted. De-fang it.
self._mark_as_closed()
raise
def accept(self):
if self.act_non_blocking:
res = self.fd.accept()
notify_opened(res[0].fileno())
return res
fd = self.fd
_timeout_exc = socket_timeout('timed out')
while True:
res = socket_accept(fd)
if res is not None:
client, addr = res
notify_opened(client.fileno())
set_nonblocking(client)
return type(self)(client), addr
self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
def _mark_as_closed(self):
""" Mark this socket as being closed """
self._closed = True
def __del__(self):
# This is in case self.close is not assigned yet (currently the constructor does it)
close = getattr(self, 'close', None)
if close is not None:
close()
def connect(self, address):
if self.act_non_blocking:
return self.fd.connect(address)
fd = self.fd
_timeout_exc = socket_timeout('timed out')
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
self._trampoline(fd, write=True)
except IOClosed:
raise OSError(errno.EBADFD)
socket_checkerr(fd)
else:
end = time.time() + self.gettimeout()
while True:
if socket_connect(fd, address):
return
if time.time() >= end:
raise _timeout_exc
timeout = end - time.time()
try:
self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc)
except IOClosed:
# ... we need some workable errno here.
raise OSError(errno.EBADFD)
socket_checkerr(fd)
def connect_ex(self, address):
if self.act_non_blocking:
return self.fd.connect_ex(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
self._trampoline(fd, write=True)
socket_checkerr(fd)
except OSError as ex:
return get_errno(ex)
except IOClosed:
return errno.EBADFD
return 0
else:
end = time.time() + self.gettimeout()
timeout_exc = socket.timeout(errno.EAGAIN)
while True:
try:
if socket_connect(fd, address):
return 0
if time.time() >= end:
raise timeout_exc
self._trampoline(fd, write=True, timeout=end - time.time(),
timeout_exc=timeout_exc)
socket_checkerr(fd)
except OSError as ex:
return get_errno(ex)
except IOClosed:
return errno.EBADFD
return 0
def dup(self, *args, **kw):
sock = self.fd.dup(*args, **kw)
newsock = type(self)(sock, set_nonblocking=False)
newsock.settimeout(self.gettimeout())
return newsock
def makefile(self, *args, **kwargs):
return _original_socket.makefile(self, *args, **kwargs)
def makeGreenFile(self, *args, **kw):
warnings.warn("makeGreenFile has been deprecated, please use "
"makefile instead", DeprecationWarning, stacklevel=2)
return self.makefile(*args, **kw)
def _read_trampoline(self):
self._trampoline(
self.fd,
read=True,
timeout=self.gettimeout(),
timeout_exc=socket_timeout('timed out'))
def _recv_loop(self, recv_meth, empty_val, *args):
if self.act_non_blocking:
return recv_meth(*args)
while True:
try:
# recv: bufsize=0?
# recv_into: buffer is empty?
# This is needed because behind the scenes we use sockets in
# nonblocking mode and builtin recv* methods. Attempting to read
# 0 bytes from a nonblocking socket using a builtin recv* method
# does not raise a timeout exception. Since we're simulating
# a blocking socket here we need to produce a timeout exception
# if needed, hence the call to trampoline.
if not args[0]:
self._read_trampoline()
return recv_meth(*args)
except OSError as e:
if get_errno(e) in SOCKET_BLOCKING:
pass
elif get_errno(e) in SOCKET_CLOSED:
return empty_val
else:
raise
try:
self._read_trampoline()
except IOClosed as e:
# Perhaps we should return '' instead?
raise EOFError()
def recv(self, bufsize, flags=0):
return self._recv_loop(self.fd.recv, b'', bufsize, flags)
def recvfrom(self, bufsize, flags=0):
return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags)
def recv_into(self, buffer, nbytes=0, flags=0):
return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags)
def recvfrom_into(self, buffer, nbytes=0, flags=0):
return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags)
def _send_loop(self, send_method, data, *args):
if self.act_non_blocking:
return send_method(data, *args)
_timeout_exc = socket_timeout('timed out')
while True:
try:
return send_method(data, *args)
except OSError as e:
eno = get_errno(e)
if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
raise
try:
self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
timeout_exc=_timeout_exc)
except IOClosed:
raise OSError(errno.ECONNRESET, 'Connection closed by another thread')
def send(self, data, flags=0):
return self._send_loop(self.fd.send, data, flags)
def sendto(self, data, *args):
return self._send_loop(self.fd.sendto, data, *args)
def sendall(self, data, flags=0):
tail = self.send(data, flags)
len_data = len(data)
while tail < len_data:
tail += self.send(data[tail:], flags)
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def settimeout(self, howlong):
if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
self.setblocking(True)
return
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required')
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
if howlong == 0.0:
self.act_non_blocking = True
self._timeout = 0.0
else:
self.act_non_blocking = False
self._timeout = howlong
def gettimeout(self):
return self._timeout
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
if "__pypy__" in sys.builtin_module_names:
def _reuse(self):
getattr(self.fd, '_sock', self.fd)._reuse()
def _drop(self):
getattr(self.fd, '_sock', self.fd)._drop()
def _operation_on_closed_file(*args, **kwargs):
raise ValueError("I/O operation on closed file")
greenpipe_doc = """
GreenPipe is a cooperative replacement for file class.
It will cooperate on pipes. It will block on regular file.
Differences from file class:
- mode is r/w property. Should re r/o
- encoding property not implemented
- write/writelines will not raise TypeError exception when non-string data is written
it will write str(data) instead
- Universal new lines are not supported and newlines property not implementeded
- file argument can be descriptor, file name or file object.
"""
# import SSL module here so we can refer to greenio.SSL.exceptionclass
try:
from OpenSSL import SSL
except ImportError:
# pyOpenSSL not installed, define exceptions anyway for convenience
class SSL:
class WantWriteError(Exception):
pass
class WantReadError(Exception):
pass
class ZeroReturnError(Exception):
pass
class SysCallError(Exception):
pass
def shutdown_safe(sock):
"""Shuts down the socket. This is a convenience method for
code that wants to gracefully handle regular sockets, SSL.Connection
sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.7 interchangeably.
Both types of ssl socket require a shutdown() before close,
but they have different arity on their shutdown method.
Regular sockets don't need a shutdown before close, but it doesn't hurt.
"""
try:
try:
# socket, ssl.SSLSocket
return sock.shutdown(socket.SHUT_RDWR)
except TypeError:
# SSL.Connection
return sock.shutdown()
except OSError as e:
# we don't care if the socket is already closed;
# this will often be the case in an http server context
if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK):
raise

View File

@ -0,0 +1,219 @@
import _pyio as _original_pyio
import errno
import os as _original_os
import socket as _original_socket
from io import (
BufferedRandom as _OriginalBufferedRandom,
BufferedReader as _OriginalBufferedReader,
BufferedWriter as _OriginalBufferedWriter,
DEFAULT_BUFFER_SIZE,
TextIOWrapper as _OriginalTextIOWrapper,
IOBase as _OriginalIOBase,
)
from types import FunctionType
from eventlet.greenio.base import (
_operation_on_closed_file,
greenpipe_doc,
set_nonblocking,
SOCKET_BLOCKING,
)
from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
from eventlet.support import get_errno
__all__ = ['_fileobject', 'GreenPipe']
# TODO get rid of this, it only seems like the original _fileobject
_fileobject = _original_socket.SocketIO
# Large part of the following code is copied from the original
# eventlet.greenio module
class GreenFileIO(_OriginalIOBase):
def __init__(self, name, mode='r', closefd=True, opener=None):
if isinstance(name, int):
fileno = name
self._name = "<fd:%d>" % fileno
else:
assert isinstance(name, str)
with open(name, mode) as fd:
self._name = fd.name
fileno = _original_os.dup(fd.fileno())
notify_opened(fileno)
self._fileno = fileno
self._mode = mode
self._closed = False
set_nonblocking(self)
self._seekable = None
@property
def closed(self):
return self._closed
def seekable(self):
if self._seekable is None:
try:
_original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
except OSError as e:
if get_errno(e) == errno.ESPIPE:
self._seekable = False
else:
raise
else:
self._seekable = True
return self._seekable
def readable(self):
return 'r' in self._mode or '+' in self._mode
def writable(self):
return 'w' in self._mode or '+' in self._mode or 'a' in self._mode
def fileno(self):
return self._fileno
def read(self, size=-1):
if size == -1:
return self.readall()
while True:
try:
return _original_os.read(self._fileno, size)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise OSError(*e.args)
self._trampoline(self, read=True)
def readall(self):
buf = []
while True:
try:
chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
if chunk == b'':
return b''.join(buf)
buf.append(chunk)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise OSError(*e.args)
self._trampoline(self, read=True)
def readinto(self, b):
up_to = len(b)
data = self.read(up_to)
bytes_read = len(data)
b[:bytes_read] = data
return bytes_read
def isatty(self):
try:
return _original_os.isatty(self.fileno())
except OSError as e:
raise OSError(*e.args)
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
if self._closed:
# Don't trampoline if we're already closed.
raise IOClosed()
try:
return trampoline(fd, read=read, write=write, timeout=timeout,
timeout_exc=timeout_exc,
mark_as_closed=self._mark_as_closed)
except IOClosed:
# Our fileno has been obsoleted. Defang ourselves to
# prevent spurious closes.
self._mark_as_closed()
raise
def _mark_as_closed(self):
""" Mark this socket as being closed """
self._closed = True
def write(self, data):
view = memoryview(data)
datalen = len(data)
offset = 0
while offset < datalen:
try:
written = _original_os.write(self._fileno, view[offset:])
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise OSError(*e.args)
trampoline(self, write=True)
else:
offset += written
return offset
def close(self):
if not self._closed:
self._closed = True
_original_os.close(self._fileno)
notify_close(self._fileno)
for method in [
'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
setattr(self, method, _operation_on_closed_file)
def truncate(self, size=-1):
if size is None:
size = -1
if size == -1:
size = self.tell()
try:
rv = _original_os.ftruncate(self._fileno, size)
except OSError as e:
raise OSError(*e.args)
else:
self.seek(size) # move position&clear buffer
return rv
def seek(self, offset, whence=_original_os.SEEK_SET):
try:
return _original_os.lseek(self._fileno, offset, whence)
except OSError as e:
raise OSError(*e.args)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
_open_environment = dict(globals())
_open_environment.update(dict(
BufferedRandom=_OriginalBufferedRandom,
BufferedWriter=_OriginalBufferedWriter,
BufferedReader=_OriginalBufferedReader,
TextIOWrapper=_OriginalTextIOWrapper,
FileIO=GreenFileIO,
os=_original_os,
))
if hasattr(_original_pyio, 'text_encoding'):
_open_environment['text_encoding'] = _original_pyio.text_encoding
_pyio_open = getattr(_original_pyio.open, '__wrapped__', _original_pyio.open)
_open = FunctionType(
_pyio_open.__code__,
_open_environment,
)
def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
try:
fileno = name.fileno()
except AttributeError:
pass
else:
fileno = _original_os.dup(fileno)
name.close()
name = fileno
return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
GreenPipe.__doc__ = greenpipe_doc

View File

@ -0,0 +1,256 @@
import traceback
import eventlet
from eventlet import queue
from eventlet.support import greenlets as greenlet
__all__ = ['GreenPool', 'GreenPile']
DEBUG = True
class GreenPool:
"""The GreenPool class is a pool of green threads.
"""
def __init__(self, size=1000):
try:
size = int(size)
except ValueError as e:
msg = 'GreenPool() expect size :: int, actual: {} {}'.format(type(size), str(e))
raise TypeError(msg)
if size < 0:
msg = 'GreenPool() expect size >= 0, actual: {}'.format(repr(size))
raise ValueError(msg)
self.size = size
self.coroutines_running = set()
self.sem = eventlet.Semaphore(size)
self.no_coros_running = eventlet.Event()
def resize(self, new_size):
""" Change the max number of greenthreads doing work at any given time.
If resize is called when there are more than *new_size* greenthreads
already working on tasks, they will be allowed to complete but no new
tasks will be allowed to get launched until enough greenthreads finish
their tasks to drop the overall quantity below *new_size*. Until
then, the return value of free() will be negative.
"""
size_delta = new_size - self.size
self.sem.counter += size_delta
self.size = new_size
def running(self):
""" Returns the number of greenthreads that are currently executing
functions in the GreenPool."""
return len(self.coroutines_running)
def free(self):
""" Returns the number of greenthreads available for use.
If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will
block the calling greenthread until a slot becomes available."""
return self.sem.counter
def spawn(self, function, *args, **kwargs):
"""Run the *function* with its arguments in its own green thread.
Returns the :class:`GreenThread <eventlet.GreenThread>`
object that is running the function, which can be used to retrieve the
results.
If the pool is currently at capacity, ``spawn`` will block until one of
the running greenthreads completes its task and frees up a slot.
This function is reentrant; *function* can call ``spawn`` on the same
pool without risk of deadlocking the whole thing.
"""
# if reentering an empty pool, don't try to wait on a coroutine freeing
# itself -- instead, just execute in the current coroutine
current = eventlet.getcurrent()
if self.sem.locked() and current in self.coroutines_running:
# a bit hacky to use the GT without switching to it
gt = eventlet.greenthread.GreenThread(current)
gt.main(function, args, kwargs)
return gt
else:
self.sem.acquire()
gt = eventlet.spawn(function, *args, **kwargs)
if not self.coroutines_running:
self.no_coros_running = eventlet.Event()
self.coroutines_running.add(gt)
gt.link(self._spawn_done)
return gt
def _spawn_n_impl(self, func, args, kwargs, coro):
try:
try:
func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit):
raise
except:
if DEBUG:
traceback.print_exc()
finally:
if coro is None:
return
else:
coro = eventlet.getcurrent()
self._spawn_done(coro)
def spawn_n(self, function, *args, **kwargs):
"""Create a greenthread to run the *function*, the same as
:meth:`spawn`. The difference is that :meth:`spawn_n` returns
None; the results of *function* are not retrievable.
"""
# if reentering an empty pool, don't try to wait on a coroutine freeing
# itself -- instead, just execute in the current coroutine
current = eventlet.getcurrent()
if self.sem.locked() and current in self.coroutines_running:
self._spawn_n_impl(function, args, kwargs, None)
else:
self.sem.acquire()
g = eventlet.spawn_n(
self._spawn_n_impl,
function, args, kwargs, True)
if not self.coroutines_running:
self.no_coros_running = eventlet.Event()
self.coroutines_running.add(g)
def waitall(self):
"""Waits until all greenthreads in the pool are finished working."""
assert eventlet.getcurrent() not in self.coroutines_running, \
"Calling waitall() from within one of the " \
"GreenPool's greenthreads will never terminate."
if self.running():
self.no_coros_running.wait()
def _spawn_done(self, coro):
self.sem.release()
if coro is not None:
self.coroutines_running.remove(coro)
# if done processing (no more work is waiting for processing),
# we can finish off any waitall() calls that might be pending
if self.sem.balance == self.size:
self.no_coros_running.send(None)
def waiting(self):
"""Return the number of greenthreads waiting to spawn.
"""
if self.sem.balance < 0:
return -self.sem.balance
else:
return 0
def _do_map(self, func, it, gi):
for args in it:
gi.spawn(func, *args)
gi.done_spawning()
def starmap(self, function, iterable):
"""This is the same as :func:`itertools.starmap`, except that *func* is
executed in a separate green thread for each item, with the concurrency
limited by the pool's size. In operation, starmap consumes a constant
amount of memory, proportional to the size of the pool, and is thus
suited for iterating over extremely long input lists.
"""
if function is None:
function = lambda *a: a
# We use a whole separate greenthread so its spawn() calls can block
# without blocking OUR caller. On the other hand, we must assume that
# our caller will immediately start trying to iterate over whatever we
# return. If that were a GreenPile, our caller would always see an
# empty sequence because the hub hasn't even entered _do_map() yet --
# _do_map() hasn't had a chance to spawn a single greenthread on this
# GreenPool! A GreenMap is safe to use with different producer and
# consumer greenthreads, because it doesn't raise StopIteration until
# the producer has explicitly called done_spawning().
gi = GreenMap(self.size)
eventlet.spawn_n(self._do_map, function, iterable, gi)
return gi
def imap(self, function, *iterables):
"""This is the same as :func:`itertools.imap`, and has the same
concurrency and memory behavior as :meth:`starmap`.
It's quite convenient for, e.g., farming out jobs from a file::
def worker(line):
return do_something(line)
pool = GreenPool()
for result in pool.imap(worker, open("filename", 'r')):
print(result)
"""
return self.starmap(function, zip(*iterables))
class GreenPile:
"""GreenPile is an abstraction representing a bunch of I/O-related tasks.
Construct a GreenPile with an existing GreenPool object. The GreenPile will
then use that pool's concurrency as it processes its jobs. There can be
many GreenPiles associated with a single GreenPool.
A GreenPile can also be constructed standalone, not associated with any
GreenPool. To do this, construct it with an integer size parameter instead
of a GreenPool.
It is not advisable to iterate over a GreenPile in a different greenthread
than the one which is calling spawn. The iterator will exit early in that
situation.
"""
def __init__(self, size_or_pool=1000):
if isinstance(size_or_pool, GreenPool):
self.pool = size_or_pool
else:
self.pool = GreenPool(size_or_pool)
self.waiters = queue.LightQueue()
self.counter = 0
def spawn(self, func, *args, **kw):
"""Runs *func* in its own green thread, with the result available by
iterating over the GreenPile object."""
self.counter += 1
try:
gt = self.pool.spawn(func, *args, **kw)
self.waiters.put(gt)
except:
self.counter -= 1
raise
def __iter__(self):
return self
def next(self):
"""Wait for the next result, suspending the current greenthread until it
is available. Raises StopIteration when there are no more results."""
if self.counter == 0:
raise StopIteration()
return self._next()
__next__ = next
def _next(self):
try:
return self.waiters.get().wait()
finally:
self.counter -= 1
# this is identical to GreenPile but it blocks on spawn if the results
# aren't consumed, and it doesn't generate its own StopIteration exception,
# instead relying on the spawning process to send one in when it's done
class GreenMap(GreenPile):
def __init__(self, size_or_pool):
super().__init__(size_or_pool)
self.waiters = queue.LightQueue(maxsize=self.pool.size)
def done_spawning(self):
self.spawn(lambda: StopIteration())
def next(self):
val = self._next()
if isinstance(val, StopIteration):
raise val
else:
return val
__next__ = next

View File

@ -0,0 +1,346 @@
from collections import deque
import sys
from greenlet import GreenletExit
from eventlet import event
from eventlet import hubs
from eventlet import support
from eventlet import timeout
from eventlet.hubs import timer
from eventlet.support import greenlets as greenlet
import warnings
__all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n',
'kill',
'spawn_after', 'spawn_after_local', 'GreenThread']
getcurrent = greenlet.getcurrent
def sleep(seconds=0):
"""Yield control to another eligible coroutine until at least *seconds* have
elapsed.
*seconds* may be specified as an integer, or a float if fractional seconds
are desired. Calling :func:`~greenthread.sleep` with *seconds* of 0 is the
canonical way of expressing a cooperative yield. For example, if one is
looping over a large list performing an expensive calculation without
calling any socket methods, it's a good idea to call ``sleep(0)``
occasionally; otherwise nothing else will run.
"""
hub = hubs.get_hub()
current = getcurrent()
if hub.greenlet is current:
raise RuntimeError('do not call blocking functions from the mainloop')
timer = hub.schedule_call_global(seconds, current.switch)
try:
hub.switch()
finally:
timer.cancel()
def spawn(func, *args, **kwargs):
"""Create a greenthread to run ``func(*args, **kwargs)``. Returns a
:class:`GreenThread` object which you can use to get the results of the
call.
Execution control returns immediately to the caller; the created greenthread
is merely scheduled to be run at the next available opportunity.
Use :func:`spawn_after` to arrange for greenthreads to be spawned
after a finite delay.
"""
hub = hubs.get_hub()
g = GreenThread(hub.greenlet)
hub.schedule_call_global(0, g.switch, func, args, kwargs)
return g
def spawn_n(func, *args, **kwargs):
"""Same as :func:`spawn`, but returns a ``greenlet`` object from
which it is not possible to retrieve either a return value or
whether it raised any exceptions. This is faster than
:func:`spawn`; it is fastest if there are no keyword arguments.
If an exception is raised in the function, spawn_n prints a stack
trace; the print can be disabled by calling
:func:`eventlet.debug.hub_exceptions` with False.
"""
return _spawn_n(0, func, args, kwargs)[1]
def spawn_after(seconds, func, *args, **kwargs):
"""Spawns *func* after *seconds* have elapsed. It runs as scheduled even if
the current greenthread has completed.
*seconds* may be specified as an integer, or a float if fractional seconds
are desired. The *func* will be called with the given *args* and
keyword arguments *kwargs*, and will be executed within its own greenthread.
The return value of :func:`spawn_after` is a :class:`GreenThread` object,
which can be used to retrieve the results of the call.
To cancel the spawn and prevent *func* from being called,
call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`.
This will not abort the function if it's already started running, which is
generally the desired behavior. If terminating *func* regardless of whether
it's started or not is the desired behavior, call :meth:`GreenThread.kill`.
"""
hub = hubs.get_hub()
g = GreenThread(hub.greenlet)
hub.schedule_call_global(seconds, g.switch, func, args, kwargs)
return g
def spawn_after_local(seconds, func, *args, **kwargs):
"""Spawns *func* after *seconds* have elapsed. The function will NOT be
called if the current greenthread has exited.
*seconds* may be specified as an integer, or a float if fractional seconds
are desired. The *func* will be called with the given *args* and
keyword arguments *kwargs*, and will be executed within its own greenthread.
The return value of :func:`spawn_after` is a :class:`GreenThread` object,
which can be used to retrieve the results of the call.
To cancel the spawn and prevent *func* from being called,
call :meth:`GreenThread.cancel` on the return value. This will not abort the
function if it's already started running. If terminating *func* regardless
of whether it's started or not is the desired behavior, call
:meth:`GreenThread.kill`.
"""
hub = hubs.get_hub()
g = GreenThread(hub.greenlet)
hub.schedule_call_local(seconds, g.switch, func, args, kwargs)
return g
def call_after_global(seconds, func, *args, **kwargs):
warnings.warn(
"call_after_global is renamed to spawn_after, which"
"has the same signature and semantics (plus a bit extra). Please do a"
" quick search-and-replace on your codebase, thanks!",
DeprecationWarning, stacklevel=2)
return _spawn_n(seconds, func, args, kwargs)[0]
def call_after_local(seconds, function, *args, **kwargs):
warnings.warn(
"call_after_local is renamed to spawn_after_local, which"
"has the same signature and semantics (plus a bit extra).",
DeprecationWarning, stacklevel=2)
hub = hubs.get_hub()
g = greenlet.greenlet(function, parent=hub.greenlet)
t = hub.schedule_call_local(seconds, g.switch, *args, **kwargs)
return t
call_after = call_after_local
def exc_after(seconds, *throw_args):
warnings.warn("Instead of exc_after, which is deprecated, use "
"Timeout(seconds, exception)",
DeprecationWarning, stacklevel=2)
if seconds is None: # dummy argument, do nothing
return timer.Timer(seconds, lambda: None)
hub = hubs.get_hub()
return hub.schedule_call_local(seconds, getcurrent().throw, *throw_args)
# deprecate, remove
TimeoutError, with_timeout = (
support.wrap_deprecated(old, new)(fun) for old, new, fun in (
('greenthread.TimeoutError', 'Timeout', timeout.Timeout),
('greenthread.with_timeout', 'with_timeout', timeout.with_timeout),
))
def _spawn_n(seconds, func, args, kwargs):
hub = hubs.get_hub()
g = greenlet.greenlet(func, parent=hub.greenlet)
t = hub.schedule_call_global(seconds, g.switch, *args, **kwargs)
return t, g
class GreenThread(greenlet.greenlet):
"""The GreenThread class is a type of Greenlet which has the additional
property of being able to retrieve the return value of the main function.
Do not construct GreenThread objects directly; call :func:`spawn` to get one.
"""
def __init__(self, parent):
greenlet.greenlet.__init__(self, self.main, parent)
self._exit_event = event.Event()
self._resolving_links = False
self._exit_funcs = None
def __await__(self):
"""
Enable ``GreenThread``s to be ``await``ed in ``async`` functions.
"""
from eventlet.hubs.asyncio import Hub
hub = hubs.get_hub()
if not isinstance(hub, Hub):
raise RuntimeError(
"This API only works with eventlet's asyncio hub. "
+ "To use it, set an EVENTLET_HUB=asyncio environment variable."
)
future = hub.loop.create_future()
# When the Future finishes, check if it was due to cancellation:
def got_future_result(future):
if future.cancelled() and not self.dead:
# GreenThread is still running, so kill it:
self.kill()
future.add_done_callback(got_future_result)
# When the GreenThread finishes, set its result on the Future:
def got_gthread_result(gthread):
if future.done():
# Can't set values any more.
return
try:
# Should return immediately:
result = gthread.wait()
future.set_result(result)
except GreenletExit:
future.cancel()
except BaseException as e:
future.set_exception(e)
self.link(got_gthread_result)
return future.__await__()
def wait(self):
""" Returns the result of the main function of this GreenThread. If the
result is a normal return value, :meth:`wait` returns it. If it raised
an exception, :meth:`wait` will raise the same exception (though the
stack trace will unavoidably contain some frames from within the
greenthread module)."""
return self._exit_event.wait()
def link(self, func, *curried_args, **curried_kwargs):
""" Set up a function to be called with the results of the GreenThread.
The function must have the following signature::
def func(gt, [curried args/kwargs]):
When the GreenThread finishes its run, it calls *func* with itself
and with the `curried arguments <http://en.wikipedia.org/wiki/Currying>`_ supplied
at link-time. If the function wants to retrieve the result of the GreenThread,
it should call wait() on its first argument.
Note that *func* is called within execution context of
the GreenThread, so it is possible to interfere with other linked
functions by doing things like switching explicitly to another
greenthread.
"""
if self._exit_funcs is None:
self._exit_funcs = deque()
self._exit_funcs.append((func, curried_args, curried_kwargs))
if self._exit_event.ready():
self._resolve_links()
def unlink(self, func, *curried_args, **curried_kwargs):
""" remove linked function set by :meth:`link`
Remove successfully return True, otherwise False
"""
if not self._exit_funcs:
return False
try:
self._exit_funcs.remove((func, curried_args, curried_kwargs))
return True
except ValueError:
return False
def main(self, function, args, kwargs):
try:
result = function(*args, **kwargs)
except:
self._exit_event.send_exception(*sys.exc_info())
self._resolve_links()
raise
else:
self._exit_event.send(result)
self._resolve_links()
def _resolve_links(self):
# ca and ckw are the curried function arguments
if self._resolving_links:
return
if not self._exit_funcs:
return
self._resolving_links = True
try:
while self._exit_funcs:
f, ca, ckw = self._exit_funcs.popleft()
f(self, *ca, **ckw)
finally:
self._resolving_links = False
def kill(self, *throw_args):
"""Kills the greenthread using :func:`kill`. After being killed
all calls to :meth:`wait` will raise *throw_args* (which default
to :class:`greenlet.GreenletExit`)."""
return kill(self, *throw_args)
def cancel(self, *throw_args):
"""Kills the greenthread using :func:`kill`, but only if it hasn't
already started running. After being canceled,
all calls to :meth:`wait` will raise *throw_args* (which default
to :class:`greenlet.GreenletExit`)."""
return cancel(self, *throw_args)
def cancel(g, *throw_args):
"""Like :func:`kill`, but only terminates the greenthread if it hasn't
already started execution. If the grenthread has already started
execution, :func:`cancel` has no effect."""
if not g:
kill(g, *throw_args)
def kill(g, *throw_args):
"""Terminates the target greenthread by raising an exception into it.
Whatever that greenthread might be doing; be it waiting for I/O or another
primitive, it sees an exception right away.
By default, this exception is GreenletExit, but a specific exception
may be specified. *throw_args* should be the same as the arguments to
raise; either an exception instance or an exc_info tuple.
Calling :func:`kill` causes the calling greenthread to cooperatively yield.
"""
if g.dead:
return
hub = hubs.get_hub()
if not g:
# greenlet hasn't started yet and therefore throw won't work
# on its own; semantically we want it to be as though the main
# method never got called
def just_raise(*a, **kw):
if throw_args:
raise throw_args[1].with_traceback(throw_args[2])
else:
raise greenlet.GreenletExit()
g.run = just_raise
if isinstance(g, GreenThread):
# it's a GreenThread object, so we want to call its main
# method to take advantage of the notification
try:
g.main(just_raise, (), {})
except:
pass
current = getcurrent()
if current is not hub.greenlet:
# arrange to wake the caller back up immediately
hub.ensure_greenlet()
hub.schedule_call_global(0, current.switch)
g.throw(*throw_args)

View File

@ -0,0 +1,188 @@
import importlib
import inspect
import os
import warnings
from eventlet import patcher
from eventlet.support import greenlets as greenlet
__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
threading = patcher.original('threading')
_threadlocal = threading.local()
# order is important, get_default_hub returns first available from here
builtin_hub_names = ('epolls', 'kqueue', 'poll', 'selects')
builtin_hub_modules = tuple(importlib.import_module('eventlet.hubs.' + name) for name in builtin_hub_names)
class HubError(Exception):
pass
def get_default_hub():
"""Select the default hub implementation based on what multiplexing
libraries are installed. The order that the hubs are tried is:
* epoll
* kqueue
* poll
* select
.. include:: ../../doc/source/common.txt
.. note :: |internal|
"""
for mod in builtin_hub_modules:
if mod.is_available():
return mod
raise HubError('no built-in hubs are available: {}'.format(builtin_hub_modules))
def use_hub(mod=None):
"""Use the module *mod*, containing a class called Hub, as the
event hub. Usually not required; the default hub is usually fine.
`mod` can be an actual hub class, a module, a string, or None.
If `mod` is a class, use it directly.
If `mod` is a module, use `module.Hub` class
If `mod` is a string and contains either '.' or ':'
then `use_hub` uses 'package.subpackage.module:Class' convention,
otherwise imports `eventlet.hubs.mod`.
If `mod` is None, `use_hub` uses the default hub.
Only call use_hub during application initialization,
because it resets the hub's state and any existing
timers or listeners will never be resumed.
These two threadlocal attributes are not part of Eventlet public API:
- `threadlocal.Hub` (capital H) is hub constructor, used when no hub is currently active
- `threadlocal.hub` (lowercase h) is active hub instance
"""
if mod is None:
mod = os.environ.get('EVENTLET_HUB', None)
if mod is None:
mod = get_default_hub()
if hasattr(_threadlocal, 'hub'):
del _threadlocal.hub
classname = ''
if isinstance(mod, str):
if mod.strip() == "":
raise RuntimeError("Need to specify a hub")
if '.' in mod or ':' in mod:
modulename, _, classname = mod.strip().partition(':')
else:
modulename = 'eventlet.hubs.' + mod
mod = importlib.import_module(modulename)
if hasattr(mod, 'is_available'):
if not mod.is_available():
raise Exception('selected hub is not available on this system mod={}'.format(mod))
else:
msg = '''Please provide `is_available()` function in your custom Eventlet hub {mod}.
It must return bool: whether hub supports current platform. See eventlet/hubs/{{epoll,kqueue}} for example.
'''.format(mod=mod)
warnings.warn(msg, DeprecationWarning, stacklevel=3)
hubclass = mod
if not inspect.isclass(mod):
hubclass = getattr(mod, classname or 'Hub')
_threadlocal.Hub = hubclass
def get_hub():
"""Get the current event hub singleton object.
.. note :: |internal|
"""
try:
hub = _threadlocal.hub
except AttributeError:
try:
_threadlocal.Hub
except AttributeError:
use_hub()
hub = _threadlocal.hub = _threadlocal.Hub()
return hub
# Lame middle file import because complex dependencies in import graph
from eventlet import timeout
def trampoline(fd, read=None, write=None, timeout=None,
timeout_exc=timeout.Timeout,
mark_as_closed=None):
"""Suspend the current coroutine until the given socket object or file
descriptor is ready to *read*, ready to *write*, or the specified
*timeout* elapses, depending on arguments specified.
To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
argument in seconds.
If the specified *timeout* elapses before the socket is ready to read or
write, *timeout_exc* will be raised instead of ``trampoline()``
returning normally.
.. note :: |internal|
"""
t = None
hub = get_hub()
current = greenlet.getcurrent()
if hub.greenlet is current:
raise RuntimeError('do not call blocking functions from the mainloop')
if (read and write):
raise RuntimeError('not allowed to trampoline for reading and writing')
try:
fileno = fd.fileno()
except AttributeError:
fileno = fd
if timeout is not None:
def _timeout(exc):
# This is only useful to insert debugging
current.throw(exc)
t = hub.schedule_call_global(timeout, _timeout, timeout_exc)
try:
if read:
listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
elif write:
listener = hub.add(hub.WRITE, fileno, current.switch, current.throw, mark_as_closed)
try:
return hub.switch()
finally:
hub.remove(listener)
finally:
if t is not None:
t.cancel()
def notify_close(fd):
"""
A particular file descriptor has been explicitly closed. Register for any
waiting listeners to be notified on the next run loop.
"""
hub = get_hub()
hub.notify_close(fd)
def notify_opened(fd):
"""
Some file descriptors may be closed 'silently' - that is, by the garbage
collector, by an external library, etc. When the OS returns a file descriptor
from an open call (or something similar), this may be the only indication we
have that the FD has been closed and then recycled.
We let the hub know that the old file descriptor is dead; any stuck listeners
will be disabled and notified in turn.
"""
hub = get_hub()
hub.mark_as_reopened(fd)
class IOClosed(IOError):
pass

View File

@ -0,0 +1,168 @@
"""
Asyncio-based hub, originally implemented by Miguel Grinberg.
"""
import asyncio
try:
import concurrent.futures.thread
concurrent_imported = True
except RuntimeError:
# This happens in weird edge cases where asyncio hub is started at
# shutdown. Not much we can do if this happens.
concurrent_imported = False
import os
import sys
from eventlet.hubs import hub
from eventlet.patcher import original
def is_available():
"""
Indicate whether this hub is available, since some hubs are
platform-specific.
Python always has asyncio, so this is always ``True``.
"""
return True
class Hub(hub.BaseHub):
"""An Eventlet hub implementation on top of an asyncio event loop."""
def __init__(self):
super().__init__()
# Make sure asyncio thread pools use real threads:
if concurrent_imported:
concurrent.futures.thread.threading = original("threading")
concurrent.futures.thread.queue = original("queue")
# Make sure select/poll/epoll/kqueue are usable by asyncio:
import selectors
selectors.select = original("select")
# Make sure DNS lookups use normal blocking API (which asyncio will run
# in a thread):
import asyncio.base_events
asyncio.base_events.socket = original("socket")
# The presumption is that eventlet is driving the event loop, so we
# want a new one we control.
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.sleep_event = asyncio.Event()
def add_timer(self, timer):
"""
Register a ``Timer``.
Typically not called directly by users.
"""
super().add_timer(timer)
self.sleep_event.set()
def _file_cb(self, cb, fileno):
"""
Callback called by ``asyncio`` when a file descriptor has an event.
"""
try:
cb(fileno)
except self.SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
self.sleep_event.set()
def add(self, evtype, fileno, cb, tb, mark_as_closed):
"""
Add a file descriptor of given event type to the ``Hub``. See the
superclass for details.
Typically not called directly by users.
"""
try:
os.fstat(fileno)
except OSError:
raise ValueError('Invalid file descriptor')
already_listening = self.listeners[evtype].get(fileno) is not None
listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
if not already_listening:
if evtype == hub.READ:
self.loop.add_reader(fileno, self._file_cb, cb, fileno)
else:
self.loop.add_writer(fileno, self._file_cb, cb, fileno)
return listener
def remove(self, listener):
"""
Remove a listener from the ``Hub``. See the superclass for details.
Typically not called directly by users.
"""
super().remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
if evtype == hub.READ:
self.loop.remove_reader(fileno)
else:
self.loop.remove_writer(fileno)
def remove_descriptor(self, fileno):
"""
Remove a file descriptor from the ``asyncio`` loop.
Typically not called directly by users.
"""
have_read = self.listeners[hub.READ].get(fileno)
have_write = self.listeners[hub.WRITE].get(fileno)
super().remove_descriptor(fileno)
if have_read:
self.loop.remove_reader(fileno)
if have_write:
self.loop.remove_writer(fileno)
def run(self, *a, **kw):
"""
Start the ``Hub`` running. See the superclass for details.
"""
async def async_run():
if self.running:
raise RuntimeError("Already running!")
try:
self.running = True
self.stopping = False
while not self.stopping:
while self.closed:
# We ditch all of these first.
self.close_one()
self.prepare_timers()
if self.debug_blocking:
self.block_detect_pre()
self.fire_timers(self.clock())
if self.debug_blocking:
self.block_detect_post()
self.prepare_timers()
wakeup_when = self.sleep_until()
if wakeup_when is None:
sleep_time = self.default_sleep()
else:
sleep_time = wakeup_when - self.clock()
if sleep_time > 0:
try:
await asyncio.wait_for(self.sleep_event.wait(),
sleep_time)
except asyncio.TimeoutError:
pass
self.sleep_event.clear()
else:
await asyncio.sleep(0)
else:
self.timers_canceled = 0
del self.timers[:]
del self.next_timers[:]
finally:
self.running = False
self.stopping = False
self.loop.run_until_complete(async_run())

View File

@ -0,0 +1,31 @@
import errno
from eventlet import patcher, support
from eventlet.hubs import hub, poll
select = patcher.original('select')
def is_available():
return hasattr(select, 'epoll')
# NOTE: we rely on the fact that the epoll flag constants
# are identical in value to the poll constants
class Hub(poll.Hub):
def __init__(self, clock=None):
super().__init__(clock=clock)
self.poll = select.epoll()
def add(self, evtype, fileno, cb, tb, mac):
oldlisteners = bool(self.listeners[self.READ].get(fileno) or
self.listeners[self.WRITE].get(fileno))
# not super() to avoid double register()
listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac)
try:
self.register(fileno, new=not oldlisteners)
except OSError as ex: # ignore EEXIST, #80
if support.get_errno(ex) != errno.EEXIST:
raise
return listener
def do_poll(self, seconds):
return self.poll.poll(seconds)

View File

@ -0,0 +1,495 @@
import errno
import heapq
import math
import signal
import sys
import traceback
arm_alarm = None
if hasattr(signal, 'setitimer'):
def alarm_itimer(seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
arm_alarm = alarm_itimer
else:
try:
import itimer
arm_alarm = itimer.alarm
except ImportError:
def alarm_signal(seconds):
signal.alarm(math.ceil(seconds))
arm_alarm = alarm_signal
import eventlet.hubs
from eventlet.hubs import timer
from eventlet.support import greenlets as greenlet
try:
from monotonic import monotonic
except ImportError:
from time import monotonic
g_prevent_multiple_readers = True
READ = "read"
WRITE = "write"
def closed_callback(fileno):
""" Used to de-fang a callback that may be triggered by a loop in BaseHub.wait
"""
# No-op.
pass
class FdListener:
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
""" The following are required:
cb - the standard callback, which will switch into the
listening greenlet to indicate that the event waited upon
is ready
tb - a 'throwback'. This is typically greenlet.throw, used
to raise a signal into the target greenlet indicating that
an event was obsoleted by its underlying filehandle being
repurposed.
mark_as_closed - if any listener is obsoleted, this is called
(in the context of some other client greenlet) to alert
underlying filehandle-wrapping objects that they've been
closed.
"""
assert (evtype is READ or evtype is WRITE)
self.evtype = evtype
self.fileno = fileno
self.cb = cb
self.tb = tb
self.mark_as_closed = mark_as_closed
self.spent = False
self.greenlet = greenlet.getcurrent()
def __repr__(self):
return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno,
self.cb, self.tb)
__str__ = __repr__
def defang(self):
self.cb = closed_callback
if self.mark_as_closed is not None:
self.mark_as_closed()
self.spent = True
noop = FdListener(READ, 0, lambda x: None, lambda x: None, None)
# in debug mode, track the call site that created the listener
class DebugListener(FdListener):
def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
self.where_called = traceback.format_stack()
self.greenlet = greenlet.getcurrent()
super().__init__(evtype, fileno, cb, tb, mark_as_closed)
def __repr__(self):
return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % (
self.evtype,
self.fileno,
self.cb,
self.tb,
self.mark_as_closed,
self.greenlet,
''.join(self.where_called))
__str__ = __repr__
def alarm_handler(signum, frame):
import inspect
raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame)))
class BaseHub:
""" Base hub class for easing the implementation of subclasses that are
specific to a particular underlying event architecture. """
SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
READ = READ
WRITE = WRITE
def __init__(self, clock=None):
self.listeners = {READ: {}, WRITE: {}}
self.secondaries = {READ: {}, WRITE: {}}
self.closed = []
if clock is None:
clock = monotonic
self.clock = clock
self.greenlet = greenlet.greenlet(self.run)
self.stopping = False
self.running = False
self.timers = []
self.next_timers = []
self.lclass = FdListener
self.timers_canceled = 0
self.debug_exceptions = True
self.debug_blocking = False
self.debug_blocking_resolution = 1
def block_detect_pre(self):
# shortest alarm we can possibly raise is one second
tmp = signal.signal(signal.SIGALRM, alarm_handler)
if tmp != alarm_handler:
self._old_signal_handler = tmp
arm_alarm(self.debug_blocking_resolution)
def block_detect_post(self):
if (hasattr(self, "_old_signal_handler") and
self._old_signal_handler):
signal.signal(signal.SIGALRM, self._old_signal_handler)
signal.alarm(0)
def add(self, evtype, fileno, cb, tb, mark_as_closed):
""" Signals an intent to or write a particular file descriptor.
The *evtype* argument is either the constant READ or WRITE.
The *fileno* argument is the file number of the file of interest.
The *cb* argument is the callback which will be called when the file
is ready for reading/writing.
The *tb* argument is the throwback used to signal (into the greenlet)
that the file was closed.
The *mark_as_closed* is used in the context of the event hub to
prepare a Python object as being closed, pre-empting further
close operations from accidentally shutting down the wrong OS thread.
"""
listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed)
bucket = self.listeners[evtype]
if fileno in bucket:
if g_prevent_multiple_readers:
raise RuntimeError(
"Second simultaneous %s on fileno %s "
"detected. Unless you really know what you're doing, "
"make sure that only one greenthread can %s any "
"particular socket. Consider using a pools.Pool. "
"If you do know what you're doing and want to disable "
"this error, call "
"eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
"THAT THREAD=%s" % (
evtype, fileno, evtype, cb, bucket[fileno]))
# store off the second listener in another structure
self.secondaries[evtype].setdefault(fileno, []).append(listener)
else:
bucket[fileno] = listener
return listener
def _obsolete(self, fileno):
""" We've received an indication that 'fileno' has been obsoleted.
Any current listeners must be defanged, and notifications to
their greenlets queued up to send.
"""
found = False
for evtype, bucket in self.secondaries.items():
if fileno in bucket:
for listener in bucket[fileno]:
found = True
self.closed.append(listener)
listener.defang()
del bucket[fileno]
# For the primary listeners, we actually need to call remove,
# which may modify the underlying OS polling objects.
for evtype, bucket in self.listeners.items():
if fileno in bucket:
listener = bucket[fileno]
found = True
self.closed.append(listener)
self.remove(listener)
listener.defang()
return found
def notify_close(self, fileno):
""" We might want to do something when a fileno is closed.
However, currently it suffices to obsolete listeners only
when we detect an old fileno being recycled, on open.
"""
pass
def remove(self, listener):
if listener.spent:
# trampoline may trigger this in its finally section.
return
fileno = listener.fileno
evtype = listener.evtype
if listener is self.listeners[evtype][fileno]:
del self.listeners[evtype][fileno]
# migrate a secondary listener to be the primary listener
if fileno in self.secondaries[evtype]:
sec = self.secondaries[evtype][fileno]
if sec:
self.listeners[evtype][fileno] = sec.pop(0)
if not sec:
del self.secondaries[evtype][fileno]
else:
self.secondaries[evtype][fileno].remove(listener)
if not self.secondaries[evtype][fileno]:
del self.secondaries[evtype][fileno]
def mark_as_reopened(self, fileno):
""" If a file descriptor is returned by the OS as the result of some
open call (or equivalent), that signals that it might be being
recycled.
Catch the case where the fd was previously in use.
"""
self._obsolete(fileno)
def remove_descriptor(self, fileno):
""" Completely remove all listeners for this fileno. For internal use
only."""
# gather any listeners we have
listeners = []
listeners.append(self.listeners[READ].get(fileno, noop))
listeners.append(self.listeners[WRITE].get(fileno, noop))
listeners.extend(self.secondaries[READ].get(fileno, ()))
listeners.extend(self.secondaries[WRITE].get(fileno, ()))
for listener in listeners:
try:
# listener.cb may want to remove(listener)
listener.cb(fileno)
except Exception:
self.squelch_generic_exception(sys.exc_info())
# NOW this fileno is now dead to all
self.listeners[READ].pop(fileno, None)
self.listeners[WRITE].pop(fileno, None)
self.secondaries[READ].pop(fileno, None)
self.secondaries[WRITE].pop(fileno, None)
def close_one(self):
""" Triggered from the main run loop. If a listener's underlying FD was
closed somehow, throw an exception back to the trampoline, which should
be able to manage it appropriately.
"""
listener = self.closed.pop()
if not listener.greenlet.dead:
# There's no point signalling a greenlet that's already dead.
listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file"))
def ensure_greenlet(self):
if self.greenlet.dead:
# create new greenlet sharing same parent as original
new = greenlet.greenlet(self.run, self.greenlet.parent)
# need to assign as parent of old greenlet
# for those greenlets that are currently
# children of the dead hub and may subsequently
# exit without further switching to hub.
self.greenlet.parent = new
self.greenlet = new
def switch(self):
cur = greenlet.getcurrent()
assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP'
switch_out = getattr(cur, 'switch_out', None)
if switch_out is not None:
try:
switch_out()
except:
self.squelch_generic_exception(sys.exc_info())
self.ensure_greenlet()
try:
if self.greenlet.parent is not cur:
cur.parent = self.greenlet
except ValueError:
pass # gets raised if there is a greenlet parent cycle
return self.greenlet.switch()
def squelch_exception(self, fileno, exc_info):
traceback.print_exception(*exc_info)
sys.stderr.write("Removing descriptor: %r\n" % (fileno,))
sys.stderr.flush()
try:
self.remove_descriptor(fileno)
except Exception as e:
sys.stderr.write("Exception while removing descriptor! %r\n" % (e,))
sys.stderr.flush()
def wait(self, seconds=None):
raise NotImplementedError("Implement this in a subclass")
def default_sleep(self):
return 60.0
def sleep_until(self):
t = self.timers
if not t:
return None
return t[0][0]
def run(self, *a, **kw):
"""Run the runloop until abort is called.
"""
# accept and discard variable arguments because they will be
# supplied if other greenlets have run and exited before the
# hub's greenlet gets a chance to run
if self.running:
raise RuntimeError("Already running!")
try:
self.running = True
self.stopping = False
while not self.stopping:
while self.closed:
# We ditch all of these first.
self.close_one()
self.prepare_timers()
if self.debug_blocking:
self.block_detect_pre()
self.fire_timers(self.clock())
if self.debug_blocking:
self.block_detect_post()
self.prepare_timers()
wakeup_when = self.sleep_until()
if wakeup_when is None:
sleep_time = self.default_sleep()
else:
sleep_time = wakeup_when - self.clock()
if sleep_time > 0:
self.wait(sleep_time)
else:
self.wait(0)
else:
self.timers_canceled = 0
del self.timers[:]
del self.next_timers[:]
finally:
self.running = False
self.stopping = False
def abort(self, wait=False):
"""Stop the runloop. If run is executing, it will exit after
completing the next runloop iteration.
Set *wait* to True to cause abort to switch to the hub immediately and
wait until it's finished processing. Waiting for the hub will only
work from the main greenthread; all other greenthreads will become
unreachable.
"""
if self.running:
self.stopping = True
if wait:
assert self.greenlet is not greenlet.getcurrent(
), "Can't abort with wait from inside the hub's greenlet."
# schedule an immediate timer just so the hub doesn't sleep
self.schedule_call_global(0, lambda: None)
# switch to it; when done the hub will switch back to its parent,
# the main greenlet
self.switch()
def squelch_generic_exception(self, exc_info):
if self.debug_exceptions:
traceback.print_exception(*exc_info)
sys.stderr.flush()
def squelch_timer_exception(self, timer, exc_info):
if self.debug_exceptions:
traceback.print_exception(*exc_info)
sys.stderr.flush()
def add_timer(self, timer):
scheduled_time = self.clock() + timer.seconds
self.next_timers.append((scheduled_time, timer))
return scheduled_time
def timer_canceled(self, timer):
self.timers_canceled += 1
len_timers = len(self.timers) + len(self.next_timers)
if len_timers > 1000 and len_timers / 2 <= self.timers_canceled:
self.timers_canceled = 0
self.timers = [t for t in self.timers if not t[1].called]
self.next_timers = [t for t in self.next_timers if not t[1].called]
heapq.heapify(self.timers)
def prepare_timers(self):
heappush = heapq.heappush
t = self.timers
for item in self.next_timers:
if item[1].called:
self.timers_canceled -= 1
else:
heappush(t, item)
del self.next_timers[:]
def schedule_call_local(self, seconds, cb, *args, **kw):
"""Schedule a callable to be called after 'seconds' seconds have
elapsed. Cancel the timer if greenlet has exited.
seconds: The number of seconds to wait.
cb: The callable to call after the given time.
*args: Arguments to pass to the callable when called.
**kw: Keyword arguments to pass to the callable when called.
"""
t = timer.LocalTimer(seconds, cb, *args, **kw)
self.add_timer(t)
return t
def schedule_call_global(self, seconds, cb, *args, **kw):
"""Schedule a callable to be called after 'seconds' seconds have
elapsed. The timer will NOT be canceled if the current greenlet has
exited before the timer fires.
seconds: The number of seconds to wait.
cb: The callable to call after the given time.
*args: Arguments to pass to the callable when called.
**kw: Keyword arguments to pass to the callable when called.
"""
t = timer.Timer(seconds, cb, *args, **kw)
self.add_timer(t)
return t
def fire_timers(self, when):
t = self.timers
heappop = heapq.heappop
while t:
next = t[0]
exp = next[0]
timer = next[1]
if when < exp:
break
heappop(t)
try:
if timer.called:
self.timers_canceled -= 1
else:
timer()
except self.SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_timer_exception(timer, sys.exc_info())
# for debugging:
def get_readers(self):
return self.listeners[READ].values()
def get_writers(self):
return self.listeners[WRITE].values()
def get_timers_count(hub):
return len(hub.timers) + len(hub.next_timers)
def set_debug_listeners(self, value):
if value:
self.lclass = DebugListener
else:
self.lclass = FdListener
def set_timer_exceptions(self, value):
self.debug_exceptions = value

View File

@ -0,0 +1,110 @@
import os
import sys
from eventlet import patcher, support
from eventlet.hubs import hub
select = patcher.original('select')
time = patcher.original('time')
def is_available():
return hasattr(select, 'kqueue')
class Hub(hub.BaseHub):
MAX_EVENTS = 100
def __init__(self, clock=None):
self.FILTERS = {
hub.READ: select.KQ_FILTER_READ,
hub.WRITE: select.KQ_FILTER_WRITE,
}
super().__init__(clock)
self._events = {}
self._init_kqueue()
def _init_kqueue(self):
self.kqueue = select.kqueue()
self._pid = os.getpid()
def _reinit_kqueue(self):
self.kqueue.close()
self._init_kqueue()
events = [e for i in self._events.values()
for e in i.values()]
self.kqueue.control(events, 0, 0)
def _control(self, events, max_events, timeout):
try:
return self.kqueue.control(events, max_events, timeout)
except OSError:
# have we forked?
if os.getpid() != self._pid:
self._reinit_kqueue()
return self.kqueue.control(events, max_events, timeout)
raise
def add(self, evtype, fileno, cb, tb, mac):
listener = super().add(evtype, fileno, cb, tb, mac)
events = self._events.setdefault(fileno, {})
if evtype not in events:
try:
event = select.kevent(fileno, self.FILTERS.get(evtype), select.KQ_EV_ADD)
self._control([event], 0, 0)
events[evtype] = event
except ValueError:
super().remove(listener)
raise
return listener
def _delete_events(self, events):
del_events = [
select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
for e in events
]
self._control(del_events, 0, 0)
def remove(self, listener):
super().remove(listener)
evtype = listener.evtype
fileno = listener.fileno
if not self.listeners[evtype].get(fileno):
event = self._events[fileno].pop(evtype, None)
if event is None:
return
try:
self._delete_events((event,))
except OSError:
pass
def remove_descriptor(self, fileno):
super().remove_descriptor(fileno)
try:
events = self._events.pop(fileno).values()
self._delete_events(events)
except KeyError:
pass
except OSError:
pass
def wait(self, seconds=None):
readers = self.listeners[self.READ]
writers = self.listeners[self.WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
result = self._control([], self.MAX_EVENTS, seconds)
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
for event in result:
fileno = event.ident
evfilt = event.filter
try:
if evfilt == select.KQ_FILTER_READ:
readers.get(fileno, hub.noop).cb(fileno)
if evfilt == select.KQ_FILTER_WRITE:
writers.get(fileno, hub.noop).cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())

View File

@ -0,0 +1,118 @@
import errno
import sys
from eventlet import patcher, support
from eventlet.hubs import hub
select = patcher.original('select')
time = patcher.original('time')
def is_available():
return hasattr(select, 'poll')
class Hub(hub.BaseHub):
def __init__(self, clock=None):
super().__init__(clock)
self.EXC_MASK = select.POLLERR | select.POLLHUP
self.READ_MASK = select.POLLIN | select.POLLPRI
self.WRITE_MASK = select.POLLOUT
self.poll = select.poll()
def add(self, evtype, fileno, cb, tb, mac):
listener = super().add(evtype, fileno, cb, tb, mac)
self.register(fileno, new=True)
return listener
def remove(self, listener):
super().remove(listener)
self.register(listener.fileno)
def register(self, fileno, new=False):
mask = 0
if self.listeners[self.READ].get(fileno):
mask |= self.READ_MASK | self.EXC_MASK
if self.listeners[self.WRITE].get(fileno):
mask |= self.WRITE_MASK | self.EXC_MASK
try:
if mask:
if new:
self.poll.register(fileno, mask)
else:
try:
self.poll.modify(fileno, mask)
except OSError:
self.poll.register(fileno, mask)
else:
try:
self.poll.unregister(fileno)
except (KeyError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
except ValueError:
# fileno is bad, issue 74
self.remove_descriptor(fileno)
raise
def remove_descriptor(self, fileno):
super().remove_descriptor(fileno)
try:
self.poll.unregister(fileno)
except (KeyError, ValueError, OSError):
# raised if we try to remove a fileno that was
# already removed/invalid
pass
def do_poll(self, seconds):
# poll.poll expects integral milliseconds
return self.poll.poll(int(seconds * 1000.0))
def wait(self, seconds=None):
readers = self.listeners[self.READ]
writers = self.listeners[self.WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
try:
presult = self.do_poll(seconds)
except OSError as e:
if support.get_errno(e) == errno.EINTR:
return
raise
SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
if self.debug_blocking:
self.block_detect_pre()
# Accumulate the listeners to call back to prior to
# triggering any of them. This is to keep the set
# of callbacks in sync with the events we've just
# polled for. It prevents one handler from invalidating
# another.
callbacks = set()
noop = hub.noop # shave getattr
for fileno, event in presult:
if event & self.READ_MASK:
callbacks.add((readers.get(fileno, noop), fileno))
if event & self.WRITE_MASK:
callbacks.add((writers.get(fileno, noop), fileno))
if event & select.POLLNVAL:
self.remove_descriptor(fileno)
continue
if event & self.EXC_MASK:
callbacks.add((readers.get(fileno, noop), fileno))
callbacks.add((writers.get(fileno, noop), fileno))
for listener, fileno in callbacks:
try:
listener.cb(fileno)
except SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())
if self.debug_blocking:
self.block_detect_post()

View File

@ -0,0 +1,4 @@
raise ImportError(
"Eventlet pyevent hub was removed because it was not maintained."
" Try version 0.22.1 or older. Sorry for the inconvenience."
)

View File

@ -0,0 +1,63 @@
import errno
import sys
from eventlet import patcher, support
from eventlet.hubs import hub
select = patcher.original('select')
time = patcher.original('time')
try:
BAD_SOCK = {errno.EBADF, errno.WSAENOTSOCK}
except AttributeError:
BAD_SOCK = {errno.EBADF}
def is_available():
return hasattr(select, 'select')
class Hub(hub.BaseHub):
def _remove_bad_fds(self):
""" Iterate through fds, removing the ones that are bad per the
operating system.
"""
all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE])
for fd in all_fds:
try:
select.select([fd], [], [], 0)
except OSError as e:
if support.get_errno(e) in BAD_SOCK:
self.remove_descriptor(fd)
def wait(self, seconds=None):
readers = self.listeners[self.READ]
writers = self.listeners[self.WRITE]
if not readers and not writers:
if seconds:
time.sleep(seconds)
return
reader_fds = list(readers)
writer_fds = list(writers)
all_fds = reader_fds + writer_fds
try:
r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds)
except OSError as e:
if support.get_errno(e) == errno.EINTR:
return
elif support.get_errno(e) in BAD_SOCK:
self._remove_bad_fds()
return
else:
raise
for fileno in er:
readers.get(fileno, hub.noop).cb(fileno)
writers.get(fileno, hub.noop).cb(fileno)
for listeners, events in ((readers, r), (writers, w)):
for fileno in events:
try:
listeners.get(fileno, hub.noop).cb(fileno)
except self.SYSTEM_EXCEPTIONS:
raise
except:
self.squelch_exception(fileno, sys.exc_info())

View File

@ -0,0 +1,106 @@
import traceback
import eventlet.hubs
from eventlet.support import greenlets as greenlet
import io
""" If true, captures a stack trace for each timer when constructed. This is
useful for debugging leaking timers, to find out where the timer was set up. """
_g_debug = False
class Timer:
def __init__(self, seconds, cb, *args, **kw):
"""Create a timer.
seconds: The minimum number of seconds to wait before calling
cb: The callback to call when the timer has expired
*args: The arguments to pass to cb
**kw: The keyword arguments to pass to cb
This timer will not be run unless it is scheduled in a runloop by
calling timer.schedule() or runloop.add_timer(timer).
"""
self.seconds = seconds
self.tpl = cb, args, kw
self.called = False
if _g_debug:
self.traceback = io.StringIO()
traceback.print_stack(file=self.traceback)
@property
def pending(self):
return not self.called
def __repr__(self):
secs = getattr(self, 'seconds', None)
cb, args, kw = getattr(self, 'tpl', (None, None, None))
retval = "Timer(%s, %s, *%s, **%s)" % (
secs, cb, args, kw)
if _g_debug and hasattr(self, 'traceback'):
retval += '\n' + self.traceback.getvalue()
return retval
def copy(self):
cb, args, kw = self.tpl
return self.__class__(self.seconds, cb, *args, **kw)
def schedule(self):
"""Schedule this timer to run in the current runloop.
"""
self.called = False
self.scheduled_time = eventlet.hubs.get_hub().add_timer(self)
return self
def __call__(self, *args):
if not self.called:
self.called = True
cb, args, kw = self.tpl
try:
cb(*args, **kw)
finally:
try:
del self.tpl
except AttributeError:
pass
def cancel(self):
"""Prevent this timer from being called. If the timer has already
been called or canceled, has no effect.
"""
if not self.called:
self.called = True
eventlet.hubs.get_hub().timer_canceled(self)
try:
del self.tpl
except AttributeError:
pass
# No default ordering in 3.x. heapq uses <
# FIXME should full set be added?
def __lt__(self, other):
return id(self) < id(other)
class LocalTimer(Timer):
def __init__(self, *args, **kwargs):
self.greenlet = greenlet.getcurrent()
Timer.__init__(self, *args, **kwargs)
@property
def pending(self):
if self.greenlet is None or self.greenlet.dead:
return False
return not self.called
def __call__(self, *args):
if not self.called:
self.called = True
if self.greenlet is not None and self.greenlet.dead:
return
cb, args, kw = self.tpl
cb(*args, **kw)
def cancel(self):
self.greenlet = None
Timer.cancel(self)

Some files were not shown because too many files have changed in this diff Show More