Added delete option to database storage.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-12 12:10:01 -04:00
parent 308604a33c
commit 963b5bc68b
1868 changed files with 192402 additions and 13278 deletions

View file

@ -0,0 +1,22 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google API Core.
This package contains common code and utilties used by Google client libraries.
"""
from google.api_core import version as api_core_version
__version__ = api_core_version.__version__

View file

@ -0,0 +1,735 @@
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bi-directional streaming RPC helpers."""
import collections
import datetime
import logging
import threading
import time
from six.moves import queue
from google.api_core import exceptions
_LOGGER = logging.getLogger(__name__)
_BIDIRECTIONAL_CONSUMER_NAME = "Thread-ConsumeBidirectionalStream"
class _RequestQueueGenerator(object):
"""A helper for sending requests to a gRPC stream from a Queue.
This generator takes requests off a given queue and yields them to gRPC.
This helper is useful when you have an indeterminate, indefinite, or
otherwise open-ended set of requests to send through a request-streaming
(or bidirectional) RPC.
The reason this is necessary is because gRPC takes an iterator as the
request for request-streaming RPCs. gRPC consumes this iterator in another
thread to allow it to block while generating requests for the stream.
However, if the generator blocks indefinitely gRPC will not be able to
clean up the thread as it'll be blocked on `next(iterator)` and not be able
to check the channel status to stop iterating. This helper mitigates that
by waiting on the queue with a timeout and checking the RPC state before
yielding.
Finally, it allows for retrying without swapping queues because if it does
pull an item off the queue when the RPC is inactive, it'll immediately put
it back and then exit. This is necessary because yielding the item in this
case will cause gRPC to discard it. In practice, this means that the order
of messages is not guaranteed. If such a thing is necessary it would be
easy to use a priority queue.
Example::
requests = request_queue_generator(q)
call = stub.StreamingRequest(iter(requests))
requests.call = call
for response in call:
print(response)
q.put(...)
Note that it is possible to accomplish this behavior without "spinning"
(using a queue timeout). One possible way would be to use more threads to
multiplex the grpc end event with the queue, another possible way is to
use selectors and a custom event/queue object. Both of these approaches
are significant from an engineering perspective for small benefit - the
CPU consumed by spinning is pretty minuscule.
Args:
queue (queue.Queue): The request queue.
period (float): The number of seconds to wait for items from the queue
before checking if the RPC is cancelled. In practice, this
determines the maximum amount of time the request consumption
thread will live after the RPC is cancelled.
initial_request (Union[protobuf.Message,
Callable[None, protobuf.Message]]): The initial request to
yield. This is done independently of the request queue to allow fo
easily restarting streams that require some initial configuration
request.
"""
def __init__(self, queue, period=1, initial_request=None):
self._queue = queue
self._period = period
self._initial_request = initial_request
self.call = None
def _is_active(self):
# Note: there is a possibility that this starts *before* the call
# property is set. So we have to check if self.call is set before
# seeing if it's active.
if self.call is not None and not self.call.is_active():
return False
else:
return True
def __iter__(self):
if self._initial_request is not None:
if callable(self._initial_request):
yield self._initial_request()
else:
yield self._initial_request
while True:
try:
item = self._queue.get(timeout=self._period)
except queue.Empty:
if not self._is_active():
_LOGGER.debug(
"Empty queue and inactive call, exiting request " "generator."
)
return
else:
# call is still active, keep waiting for queue items.
continue
# The consumer explicitly sent "None", indicating that the request
# should end.
if item is None:
_LOGGER.debug("Cleanly exiting request generator.")
return
if not self._is_active():
# We have an item, but the call is closed. We should put the
# item back on the queue so that the next call can consume it.
self._queue.put(item)
_LOGGER.debug(
"Inactive call, replacing item on queue and exiting "
"request generator."
)
return
yield item
class _Throttle(object):
"""A context manager limiting the total entries in a sliding time window.
If more than ``access_limit`` attempts are made to enter the context manager
instance in the last ``time window`` interval, the exceeding requests block
until enough time elapses.
The context manager instances are thread-safe and can be shared between
multiple threads. If multiple requests are blocked and waiting to enter,
the exact order in which they are allowed to proceed is not determined.
Example::
max_three_per_second = _Throttle(
access_limit=3, time_window=datetime.timedelta(seconds=1)
)
for i in range(5):
with max_three_per_second as time_waited:
print("{}: Waited {} seconds to enter".format(i, time_waited))
Args:
access_limit (int): the maximum number of entries allowed in the time window
time_window (datetime.timedelta): the width of the sliding time window
"""
def __init__(self, access_limit, time_window):
if access_limit < 1:
raise ValueError("access_limit argument must be positive")
if time_window <= datetime.timedelta(0):
raise ValueError("time_window argument must be a positive timedelta")
self._time_window = time_window
self._access_limit = access_limit
self._past_entries = collections.deque(
maxlen=access_limit
) # least recent first
self._entry_lock = threading.Lock()
def __enter__(self):
with self._entry_lock:
cutoff_time = datetime.datetime.now() - self._time_window
# drop the entries that are too old, as they are no longer relevant
while self._past_entries and self._past_entries[0] < cutoff_time:
self._past_entries.popleft()
if len(self._past_entries) < self._access_limit:
self._past_entries.append(datetime.datetime.now())
return 0.0 # no waiting was needed
to_wait = (self._past_entries[0] - cutoff_time).total_seconds()
time.sleep(to_wait)
self._past_entries.append(datetime.datetime.now())
return to_wait
def __exit__(self, *_):
pass
def __repr__(self):
return "{}(access_limit={}, time_window={})".format(
self.__class__.__name__, self._access_limit, repr(self._time_window)
)
class BidiRpc(object):
"""A helper for consuming a bi-directional streaming RPC.
This maps gRPC's built-in interface which uses a request iterator and a
response iterator into a socket-like :func:`send` and :func:`recv`. This
is a more useful pattern for long-running or asymmetric streams (streams
where there is not a direct correlation between the requests and
responses).
Example::
initial_request = example_pb2.StreamingRpcRequest(
setting='example')
rpc = BidiRpc(
stub.StreamingRpc,
initial_request=initial_request,
metadata=[('name', 'value')]
)
rpc.open()
while rpc.is_active():
print(rpc.recv())
rpc.send(example_pb2.StreamingRpcRequest(
data='example'))
This does *not* retry the stream on errors. See :class:`ResumableBidiRpc`.
Args:
start_rpc (grpc.StreamStreamMultiCallable): The gRPC method used to
start the RPC.
initial_request (Union[protobuf.Message,
Callable[None, protobuf.Message]]): The initial request to
yield. This is useful if an initial request is needed to start the
stream.
metadata (Sequence[Tuple(str, str)]): RPC metadata to include in
the request.
"""
def __init__(self, start_rpc, initial_request=None, metadata=None):
self._start_rpc = start_rpc
self._initial_request = initial_request
self._rpc_metadata = metadata
self._request_queue = queue.Queue()
self._request_generator = None
self._is_active = False
self._callbacks = []
self.call = None
def add_done_callback(self, callback):
"""Adds a callback that will be called when the RPC terminates.
This occurs when the RPC errors or is successfully terminated.
Args:
callback (Callable[[grpc.Future], None]): The callback to execute.
It will be provided with the same gRPC future as the underlying
stream which will also be a :class:`grpc.Call`.
"""
self._callbacks.append(callback)
def _on_call_done(self, future):
for callback in self._callbacks:
callback(future)
def open(self):
"""Opens the stream."""
if self.is_active:
raise ValueError("Can not open an already open stream.")
request_generator = _RequestQueueGenerator(
self._request_queue, initial_request=self._initial_request
)
call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata)
request_generator.call = call
# TODO: api_core should expose the future interface for wrapped
# callables as well.
if hasattr(call, "_wrapped"): # pragma: NO COVER
call._wrapped.add_done_callback(self._on_call_done)
else:
call.add_done_callback(self._on_call_done)
self._request_generator = request_generator
self.call = call
def close(self):
"""Closes the stream."""
if self.call is None:
return
self._request_queue.put(None)
self.call.cancel()
self._request_generator = None
# Don't set self.call to None. Keep it around so that send/recv can
# raise the error.
def send(self, request):
"""Queue a message to be sent on the stream.
Send is non-blocking.
If the underlying RPC has been closed, this will raise.
Args:
request (protobuf.Message): The request to send.
"""
if self.call is None:
raise ValueError("Can not send() on an RPC that has never been open()ed.")
# Don't use self.is_active(), as ResumableBidiRpc will overload it
# to mean something semantically different.
if self.call.is_active():
self._request_queue.put(request)
else:
# calling next should cause the call to raise.
next(self.call)
def recv(self):
"""Wait for a message to be returned from the stream.
Recv is blocking.
If the underlying RPC has been closed, this will raise.
Returns:
protobuf.Message: The received message.
"""
if self.call is None:
raise ValueError("Can not recv() on an RPC that has never been open()ed.")
return next(self.call)
@property
def is_active(self):
"""bool: True if this stream is currently open and active."""
return self.call is not None and self.call.is_active()
@property
def pending_requests(self):
"""int: Returns an estimate of the number of queued requests."""
return self._request_queue.qsize()
def _never_terminate(future_or_error):
"""By default, no errors cause BiDi termination."""
return False
class ResumableBidiRpc(BidiRpc):
"""A :class:`BidiRpc` that can automatically resume the stream on errors.
It uses the ``should_recover`` arg to determine if it should re-establish
the stream on error.
Example::
def should_recover(exc):
return (
isinstance(exc, grpc.RpcError) and
exc.code() == grpc.StatusCode.UNVAILABLE)
initial_request = example_pb2.StreamingRpcRequest(
setting='example')
metadata = [('header_name', 'value')]
rpc = ResumableBidiRpc(
stub.StreamingRpc,
should_recover=should_recover,
initial_request=initial_request,
metadata=metadata
)
rpc.open()
while rpc.is_active():
print(rpc.recv())
rpc.send(example_pb2.StreamingRpcRequest(
data='example'))
Args:
start_rpc (grpc.StreamStreamMultiCallable): The gRPC method used to
start the RPC.
initial_request (Union[protobuf.Message,
Callable[None, protobuf.Message]]): The initial request to
yield. This is useful if an initial request is needed to start the
stream.
should_recover (Callable[[Exception], bool]): A function that returns
True if the stream should be recovered. This will be called
whenever an error is encountered on the stream.
should_terminate (Callable[[Exception], bool]): A function that returns
True if the stream should be terminated. This will be called
whenever an error is encountered on the stream.
metadata Sequence[Tuple(str, str)]: RPC metadata to include in
the request.
throttle_reopen (bool): If ``True``, throttling will be applied to
stream reopen calls. Defaults to ``False``.
"""
def __init__(
self,
start_rpc,
should_recover,
should_terminate=_never_terminate,
initial_request=None,
metadata=None,
throttle_reopen=False,
):
super(ResumableBidiRpc, self).__init__(start_rpc, initial_request, metadata)
self._should_recover = should_recover
self._should_terminate = should_terminate
self._operational_lock = threading.RLock()
self._finalized = False
self._finalize_lock = threading.Lock()
if throttle_reopen:
self._reopen_throttle = _Throttle(
access_limit=5, time_window=datetime.timedelta(seconds=10)
)
else:
self._reopen_throttle = None
def _finalize(self, result):
with self._finalize_lock:
if self._finalized:
return
for callback in self._callbacks:
callback(result)
self._finalized = True
def _on_call_done(self, future):
# Unlike the base class, we only execute the callbacks on a terminal
# error, not for errors that we can recover from. Note that grpc's
# "future" here is also a grpc.RpcError.
with self._operational_lock:
if self._should_terminate(future):
self._finalize(future)
elif not self._should_recover(future):
self._finalize(future)
else:
_LOGGER.debug("Re-opening stream from gRPC callback.")
self._reopen()
def _reopen(self):
with self._operational_lock:
# Another thread already managed to re-open this stream.
if self.call is not None and self.call.is_active():
_LOGGER.debug("Stream was already re-established.")
return
self.call = None
# Request generator should exit cleanly since the RPC its bound to
# has exited.
self._request_generator = None
# Note: we do not currently do any sort of backoff here. The
# assumption is that re-establishing the stream under normal
# circumstances will happen in intervals greater than 60s.
# However, it is possible in a degenerative case that the server
# closes the stream rapidly which would lead to thrashing here,
# but hopefully in those cases the server would return a non-
# retryable error.
try:
if self._reopen_throttle:
with self._reopen_throttle:
self.open()
else:
self.open()
# If re-opening or re-calling the method fails for any reason,
# consider it a terminal error and finalize the stream.
except Exception as exc:
_LOGGER.debug("Failed to re-open stream due to %s", exc)
self._finalize(exc)
raise
_LOGGER.info("Re-established stream")
def _recoverable(self, method, *args, **kwargs):
"""Wraps a method to recover the stream and retry on error.
If a retryable error occurs while making the call, then the stream will
be re-opened and the method will be retried. This happens indefinitely
so long as the error is a retryable one. If an error occurs while
re-opening the stream, then this method will raise immediately and
trigger finalization of this object.
Args:
method (Callable[..., Any]): The method to call.
args: The args to pass to the method.
kwargs: The kwargs to pass to the method.
"""
while True:
try:
return method(*args, **kwargs)
except Exception as exc:
with self._operational_lock:
_LOGGER.debug("Call to retryable %r caused %s.", method, exc)
if self._should_terminate(exc):
self.close()
_LOGGER.debug("Terminating %r due to %s.", method, exc)
self._finalize(exc)
break
if not self._should_recover(exc):
self.close()
_LOGGER.debug("Not retrying %r due to %s.", method, exc)
self._finalize(exc)
raise exc
_LOGGER.debug("Re-opening stream from retryable %r.", method)
self._reopen()
def _send(self, request):
# Grab a reference to the RPC call. Because another thread (notably
# the gRPC error thread) can modify self.call (by invoking reopen),
# we should ensure our reference can not change underneath us.
# If self.call is modified (such as replaced with a new RPC call) then
# this will use the "old" RPC, which should result in the same
# exception passed into gRPC's error handler being raised here, which
# will be handled by the usual error handling in retryable.
with self._operational_lock:
call = self.call
if call is None:
raise ValueError("Can not send() on an RPC that has never been open()ed.")
# Don't use self.is_active(), as ResumableBidiRpc will overload it
# to mean something semantically different.
if call.is_active():
self._request_queue.put(request)
pass
else:
# calling next should cause the call to raise.
next(call)
def send(self, request):
return self._recoverable(self._send, request)
def _recv(self):
with self._operational_lock:
call = self.call
if call is None:
raise ValueError("Can not recv() on an RPC that has never been open()ed.")
return next(call)
def recv(self):
return self._recoverable(self._recv)
def close(self):
self._finalize(None)
super(ResumableBidiRpc, self).close()
@property
def is_active(self):
"""bool: True if this stream is currently open and active."""
# Use the operational lock. It's entirely possible for something
# to check the active state *while* the RPC is being retried.
# Also, use finalized to track the actual terminal state here.
# This is because if the stream is re-established by the gRPC thread
# it's technically possible to check this between when gRPC marks the
# RPC as inactive and when gRPC executes our callback that re-opens
# the stream.
with self._operational_lock:
return self.call is not None and not self._finalized
class BackgroundConsumer(object):
"""A bi-directional stream consumer that runs in a separate thread.
This maps the consumption of a stream into a callback-based model. It also
provides :func:`pause` and :func:`resume` to allow for flow-control.
Example::
def should_recover(exc):
return (
isinstance(exc, grpc.RpcError) and
exc.code() == grpc.StatusCode.UNVAILABLE)
initial_request = example_pb2.StreamingRpcRequest(
setting='example')
rpc = ResumeableBidiRpc(
stub.StreamingRpc,
initial_request=initial_request,
should_recover=should_recover)
def on_response(response):
print(response)
consumer = BackgroundConsumer(rpc, on_response)
consumer.start()
Note that error handling *must* be done by using the provided
``bidi_rpc``'s ``add_done_callback``. This helper will automatically exit
whenever the RPC itself exits and will not provide any error details.
Args:
bidi_rpc (BidiRpc): The RPC to consume. Should not have been
``open()``ed yet.
on_response (Callable[[protobuf.Message], None]): The callback to
be called for every response on the stream.
"""
def __init__(self, bidi_rpc, on_response):
self._bidi_rpc = bidi_rpc
self._on_response = on_response
self._paused = False
self._wake = threading.Condition()
self._thread = None
self._operational_lock = threading.Lock()
def _on_call_done(self, future):
# Resume the thread if it's paused, this prevents blocking forever
# when the RPC has terminated.
self.resume()
def _thread_main(self, ready):
try:
ready.set()
self._bidi_rpc.add_done_callback(self._on_call_done)
self._bidi_rpc.open()
while self._bidi_rpc.is_active:
# Do not allow the paused status to change at all during this
# section. There is a condition where we could be resumed
# between checking if we are paused and calling wake.wait(),
# which means that we will miss the notification to wake up
# (oops!) and wait for a notification that will never come.
# Keeping the lock throughout avoids that.
# In the future, we could use `Condition.wait_for` if we drop
# Python 2.7.
with self._wake:
while self._paused:
_LOGGER.debug("paused, waiting for waking.")
self._wake.wait()
_LOGGER.debug("woken.")
_LOGGER.debug("waiting for recv.")
response = self._bidi_rpc.recv()
_LOGGER.debug("recved response.")
self._on_response(response)
except exceptions.GoogleAPICallError as exc:
_LOGGER.debug(
"%s caught error %s and will exit. Generally this is due to "
"the RPC itself being cancelled and the error will be "
"surfaced to the calling code.",
_BIDIRECTIONAL_CONSUMER_NAME,
exc,
exc_info=True,
)
except Exception as exc:
_LOGGER.exception(
"%s caught unexpected exception %s and will exit.",
_BIDIRECTIONAL_CONSUMER_NAME,
exc,
)
_LOGGER.info("%s exiting", _BIDIRECTIONAL_CONSUMER_NAME)
def start(self):
"""Start the background thread and begin consuming the thread."""
with self._operational_lock:
ready = threading.Event()
thread = threading.Thread(
name=_BIDIRECTIONAL_CONSUMER_NAME,
target=self._thread_main,
args=(ready,),
)
thread.daemon = True
thread.start()
# Other parts of the code rely on `thread.is_alive` which
# isn't sufficient to know if a thread is active, just that it may
# soon be active. This can cause races. Further protect
# against races by using a ready event and wait on it to be set.
ready.wait()
self._thread = thread
_LOGGER.debug("Started helper thread %s", thread.name)
def stop(self):
"""Stop consuming the stream and shutdown the background thread."""
with self._operational_lock:
self._bidi_rpc.close()
if self._thread is not None:
# Resume the thread to wake it up in case it is sleeping.
self.resume()
# The daemonized thread may itself block, so don't wait
# for it longer than a second.
self._thread.join(1.0)
if self._thread.is_alive(): # pragma: NO COVER
_LOGGER.warning("Background thread did not exit.")
self._thread = None
@property
def is_active(self):
"""bool: True if the background thread is active."""
return self._thread is not None and self._thread.is_alive()
def pause(self):
"""Pauses the response stream.
This does *not* pause the request stream.
"""
with self._wake:
self._paused = True
def resume(self):
"""Resumes the response stream."""
with self._wake:
self._paused = False
self._wake.notifyAll()
@property
def is_paused(self):
"""bool: True if the response stream is paused."""
return self._paused

View file

@ -0,0 +1,98 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for providing client information.
Client information is used to send information about the calling client,
such as the library and Python version, to API services.
"""
import platform
import pkg_resources
from google.api_core import version as api_core_version
_PY_VERSION = platform.python_version()
_API_CORE_VERSION = api_core_version.__version__
try:
_GRPC_VERSION = pkg_resources.get_distribution("grpcio").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GRPC_VERSION = None
class ClientInfo(object):
"""Client information used to generate a user-agent for API calls.
This user-agent information is sent along with API calls to allow the
receiving service to do analytics on which versions of Python and Google
libraries are being used.
Args:
python_version (str): The Python interpreter version, for example,
``'2.7.13'``.
grpc_version (Optional[str]): The gRPC library version.
api_core_version (str): The google-api-core library version.
gapic_version (Optional[str]): The sversion of gapic-generated client
library, if the library was generated by gapic.
client_library_version (Optional[str]): The version of the client
library, generally used if the client library was not generated
by gapic or if additional functionality was built on top of
a gapic client library.
user_agent (Optional[str]): Prefix to the user agent header. This is
used to supply information such as application name or partner tool.
Recommended format: ``application-or-tool-ID/major.minor.version``.
"""
def __init__(
self,
python_version=_PY_VERSION,
grpc_version=_GRPC_VERSION,
api_core_version=_API_CORE_VERSION,
gapic_version=None,
client_library_version=None,
user_agent=None,
):
self.python_version = python_version
self.grpc_version = grpc_version
self.api_core_version = api_core_version
self.gapic_version = gapic_version
self.client_library_version = client_library_version
self.user_agent = user_agent
def to_user_agent(self):
"""Returns the user-agent string for this client info."""
# Note: the order here is important as the internal metrics system
# expects these items to be in specific locations.
ua = ""
if self.user_agent is not None:
ua += "{user_agent} "
ua += "gl-python/{python_version} "
if self.grpc_version is not None:
ua += "grpc/{grpc_version} "
ua += "gax/{api_core_version} "
if self.gapic_version is not None:
ua += "gapic/{gapic_version} "
if self.client_library_version is not None:
ua += "gccl/{client_library_version} "
return ua.format(**self.__dict__).strip()

View file

@ -0,0 +1,116 @@
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client options class.
Client options provide a consistent interface for user options to be defined
across clients.
You can pass a client options object to a client.
.. code-block:: python
from google.api_core.client_options import ClientOptions
from google.cloud.vision_v1 import ImageAnnotatorClient
def get_client_cert():
# code to load client certificate and private key.
return client_cert_bytes, client_private_key_bytes
options = ClientOptions(api_endpoint="foo.googleapis.com",
client_cert_source=get_client_cert)
client = ImageAnnotatorClient(client_options=options)
You can also pass a mapping object.
.. code-block:: python
from google.cloud.vision_v1 import ImageAnnotatorClient
client = ImageAnnotatorClient(
client_options={
"api_endpoint": "foo.googleapis.com",
"client_cert_source" : get_client_cert
})
"""
class ClientOptions(object):
"""Client Options used to set options on clients.
Args:
api_endpoint (Optional[str]): The desired API endpoint, e.g.,
compute.googleapis.com
client_cert_source (Optional[Callable[[], (bytes, bytes)]]): A callback
which returns client certificate bytes and private key bytes both in
PEM format. ``client_cert_source`` and ``client_encrypted_cert_source``
are mutually exclusive.
client_encrypted_cert_source (Optional[Callable[[], (str, str, bytes)]]):
A callback which returns client certificate file path, encrypted
private key file path, and the passphrase bytes.``client_cert_source``
and ``client_encrypted_cert_source`` are mutually exclusive.
quota_project_id (Optional[str]): A project name that a client's
quota belongs to.
credentials_file (Optional[str]): A path to a file storing credentials.
scopes (Optional[Sequence[str]]): OAuth access token override scopes.
Raises:
ValueError: If both ``client_cert_source`` and ``client_encrypted_cert_source``
are provided.
"""
def __init__(
self,
api_endpoint=None,
client_cert_source=None,
client_encrypted_cert_source=None,
quota_project_id=None,
credentials_file=None,
scopes=None,
):
if client_cert_source and client_encrypted_cert_source:
raise ValueError(
"client_cert_source and client_encrypted_cert_source are mutually exclusive"
)
self.api_endpoint = api_endpoint
self.client_cert_source = client_cert_source
self.client_encrypted_cert_source = client_encrypted_cert_source
self.quota_project_id = quota_project_id
self.credentials_file = credentials_file
self.scopes = scopes
def __repr__(self):
return "ClientOptions: " + repr(self.__dict__)
def from_dict(options):
"""Construct a client options object from a mapping object.
Args:
options (six.moves.collections_abc.Mapping): A mapping object with client options.
See the docstring for ClientOptions for details on valid arguments.
"""
client_options = ClientOptions()
for key, value in options.items():
if hasattr(client_options, key):
setattr(client_options, key, value)
else:
raise ValueError("ClientOptions does not accept an option '" + key + "'")
return client_options

View file

@ -0,0 +1,296 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for :mod:`datetime`."""
import calendar
import datetime
import re
import pytz
from google.protobuf import timestamp_pb2
_UTC_EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
_RFC3339_MICROS = "%Y-%m-%dT%H:%M:%S.%fZ"
_RFC3339_NO_FRACTION = "%Y-%m-%dT%H:%M:%S"
# datetime.strptime cannot handle nanosecond precision: parse w/ regex
_RFC3339_NANOS = re.compile(
r"""
(?P<no_fraction>
\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2} # YYYY-MM-DDTHH:MM:SS
)
( # Optional decimal part
\. # decimal point
(?P<nanos>\d{1,9}) # nanoseconds, maybe truncated
)?
Z # Zulu
""",
re.VERBOSE,
)
def utcnow():
"""A :meth:`datetime.datetime.utcnow()` alias to allow mocking in tests."""
return datetime.datetime.utcnow()
def to_milliseconds(value):
"""Convert a zone-aware datetime to milliseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Milliseconds since the unix epoch.
"""
micros = to_microseconds(value)
return micros // 1000
def from_microseconds(value):
"""Convert timestamp in microseconds since the unix epoch to datetime.
Args:
value (float): The timestamp to convert, in microseconds.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp in
UTC.
"""
return _UTC_EPOCH + datetime.timedelta(microseconds=value)
def to_microseconds(value):
"""Convert a datetime to microseconds since the unix epoch.
Args:
value (datetime.datetime): The datetime to covert.
Returns:
int: Microseconds since the unix epoch.
"""
if not value.tzinfo:
value = value.replace(tzinfo=pytz.utc)
# Regardless of what timezone is on the value, convert it to UTC.
value = value.astimezone(pytz.utc)
# Convert the datetime to a microsecond timestamp.
return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond
def from_iso8601_date(value):
"""Convert a ISO8601 date string to a date.
Args:
value (str): The ISO8601 date string.
Returns:
datetime.date: A date equivalent to the date string.
"""
return datetime.datetime.strptime(value, "%Y-%m-%d").date()
def from_iso8601_time(value):
"""Convert a zoneless ISO8601 time string to a time.
Args:
value (str): The ISO8601 time string.
Returns:
datetime.time: A time equivalent to the time string.
"""
return datetime.datetime.strptime(value, "%H:%M:%S").time()
def from_rfc3339(value):
"""Convert an RFC3339-format timestamp to a native datetime.
Supported formats include those without fractional seconds, or with
any fraction up to nanosecond precision.
.. note::
Python datetimes do not support nanosecond precision; this function
therefore truncates such values to microseconds.
Args:
value (str): The RFC3339 string to convert.
Returns:
datetime.datetime: The datetime object equivalent to the timestamp
in UTC.
Raises:
ValueError: If the timestamp does not match the RFC3339
regular expression.
"""
with_nanos = _RFC3339_NANOS.match(value)
if with_nanos is None:
raise ValueError(
"Timestamp: {!r}, does not match pattern: {!r}".format(
value, _RFC3339_NANOS.pattern
)
)
bare_seconds = datetime.datetime.strptime(
with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION
)
fraction = with_nanos.group("nanos")
if fraction is None:
micros = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
micros = nanos // 1000
return bare_seconds.replace(microsecond=micros, tzinfo=pytz.utc)
from_rfc3339_nanos = from_rfc3339 # from_rfc3339_nanos method was deprecated.
def to_rfc3339(value, ignore_zone=True):
"""Convert a datetime to an RFC3339 timestamp string.
Args:
value (datetime.datetime):
The datetime object to be converted to a string.
ignore_zone (bool): If True, then the timezone (if any) of the
datetime object is ignored and the datetime is treated as UTC.
Returns:
str: The RFC3339 formated string representing the datetime.
"""
if not ignore_zone and value.tzinfo is not None:
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
return value.strftime(_RFC3339_MICROS)
class DatetimeWithNanoseconds(datetime.datetime):
"""Track nanosecond in addition to normal datetime attrs.
Nanosecond can be passed only as a keyword argument.
"""
__slots__ = ("_nanosecond",)
# pylint: disable=arguments-differ
def __new__(cls, *args, **kw):
nanos = kw.pop("nanosecond", 0)
if nanos > 0:
if "microsecond" in kw:
raise TypeError("Specify only one of 'microsecond' or 'nanosecond'")
kw["microsecond"] = nanos // 1000
inst = datetime.datetime.__new__(cls, *args, **kw)
inst._nanosecond = nanos or 0
return inst
# pylint: disable=arguments-differ
@property
def nanosecond(self):
"""Read-only: nanosecond precision."""
return self._nanosecond
def rfc3339(self):
"""Return an RFC3339-compliant timestamp.
Returns:
(str): Timestamp string according to RFC3339 spec.
"""
if self._nanosecond == 0:
return to_rfc3339(self)
nanos = str(self._nanosecond).rjust(9, "0").rstrip("0")
return "{}.{}Z".format(self.strftime(_RFC3339_NO_FRACTION), nanos)
@classmethod
def from_rfc3339(cls, stamp):
"""Parse RFC3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (str): RFC3339 stamp, with up to nanosecond precision
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp string
Raises:
ValueError: if `stamp` does not match the expected format
"""
with_nanos = _RFC3339_NANOS.match(stamp)
if with_nanos is None:
raise ValueError(
"Timestamp: {}, does not match pattern: {}".format(
stamp, _RFC3339_NANOS.pattern
)
)
bare = datetime.datetime.strptime(
with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION
)
fraction = with_nanos.group("nanos")
if fraction is None:
nanos = 0
else:
scale = 9 - len(fraction)
nanos = int(fraction) * (10 ** scale)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=nanos,
tzinfo=pytz.UTC,
)
def timestamp_pb(self):
"""Return a timestamp message.
Returns:
(:class:`~google.protobuf.timestamp_pb2.Timestamp`): Timestamp message
"""
inst = self if self.tzinfo is not None else self.replace(tzinfo=pytz.UTC)
delta = inst - _UTC_EPOCH
seconds = int(delta.total_seconds())
nanos = self._nanosecond or self.microsecond * 1000
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
@classmethod
def from_timestamp_pb(cls, stamp):
"""Parse RFC3339-compliant timestamp, preserving nanoseconds.
Args:
stamp (:class:`~google.protobuf.timestamp_pb2.Timestamp`): timestamp message
Returns:
:class:`DatetimeWithNanoseconds`:
an instance matching the timestamp message
"""
microseconds = int(stamp.seconds * 1e6)
bare = from_microseconds(microseconds)
return cls(
bare.year,
bare.month,
bare.day,
bare.hour,
bare.minute,
bare.second,
nanosecond=stamp.nanos,
tzinfo=pytz.UTC,
)

View file

@ -0,0 +1,474 @@
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions raised by Google API core & clients.
This module provides base classes for all errors raised by libraries based
on :mod:`google.api_core`, including both HTTP and gRPC clients.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import six
from six.moves import http_client
try:
import grpc
except ImportError: # pragma: NO COVER
grpc = None
# Lookup tables for mapping exceptions from HTTP and gRPC transports.
# Populated by _APICallErrorMeta
_HTTP_CODE_TO_EXCEPTION = {}
_GRPC_CODE_TO_EXCEPTION = {}
class GoogleAPIError(Exception):
"""Base class for all exceptions raised by Google API Clients."""
pass
class DuplicateCredentialArgs(GoogleAPIError):
"""Raised when multiple credentials are passed."""
pass
@six.python_2_unicode_compatible
class RetryError(GoogleAPIError):
"""Raised when a function has exhausted all of its available retries.
Args:
message (str): The exception message.
cause (Exception): The last exception raised when retring the
function.
"""
def __init__(self, message, cause):
super(RetryError, self).__init__(message)
self.message = message
self._cause = cause
@property
def cause(self):
"""The last exception raised when retrying the function."""
return self._cause
def __str__(self):
return "{}, last exception: {}".format(self.message, self.cause)
class _GoogleAPICallErrorMeta(type):
"""Metaclass for registering GoogleAPICallError subclasses."""
def __new__(mcs, name, bases, class_dict):
cls = type.__new__(mcs, name, bases, class_dict)
if cls.code is not None:
_HTTP_CODE_TO_EXCEPTION.setdefault(cls.code, cls)
if cls.grpc_status_code is not None:
_GRPC_CODE_TO_EXCEPTION.setdefault(cls.grpc_status_code, cls)
return cls
@six.python_2_unicode_compatible
@six.add_metaclass(_GoogleAPICallErrorMeta)
class GoogleAPICallError(GoogleAPIError):
"""Base class for exceptions raised by calling API methods.
Args:
message (str): The exception message.
errors (Sequence[Any]): An optional list of error details.
response (Union[requests.Request, grpc.Call]): The response or
gRPC call metadata.
"""
code = None
"""Optional[int]: The HTTP status code associated with this error.
This may be ``None`` if the exception does not have a direct mapping
to an HTTP error.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
"""
grpc_status_code = None
"""Optional[grpc.StatusCode]: The gRPC status code associated with this
error.
This may be ``None`` if the exception does not match up to a gRPC error.
"""
def __init__(self, message, errors=(), response=None):
super(GoogleAPICallError, self).__init__(message)
self.message = message
"""str: The exception message."""
self._errors = errors
self._response = response
def __str__(self):
return "{} {}".format(self.code, self.message)
@property
def errors(self):
"""Detailed error information.
Returns:
Sequence[Any]: A list of additional error details.
"""
return list(self._errors)
@property
def response(self):
"""Optional[Union[requests.Request, grpc.Call]]: The response or
gRPC call metadata."""
return self._response
class Redirection(GoogleAPICallError):
"""Base class for for all redirection (HTTP 3xx) responses."""
class MovedPermanently(Redirection):
"""Exception mapping a ``301 Moved Permanently`` response."""
code = http_client.MOVED_PERMANENTLY
class NotModified(Redirection):
"""Exception mapping a ``304 Not Modified`` response."""
code = http_client.NOT_MODIFIED
class TemporaryRedirect(Redirection):
"""Exception mapping a ``307 Temporary Redirect`` response."""
code = http_client.TEMPORARY_REDIRECT
class ResumeIncomplete(Redirection):
"""Exception mapping a ``308 Resume Incomplete`` response.
.. note:: :attr:`http_client.PERMANENT_REDIRECT` is ``308``, but Google
APIs differ in their use of this status code.
"""
code = 308
class ClientError(GoogleAPICallError):
"""Base class for all client error (HTTP 4xx) responses."""
class BadRequest(ClientError):
"""Exception mapping a ``400 Bad Request`` response."""
code = http_client.BAD_REQUEST
class InvalidArgument(BadRequest):
"""Exception mapping a :attr:`grpc.StatusCode.INVALID_ARGUMENT` error."""
grpc_status_code = grpc.StatusCode.INVALID_ARGUMENT if grpc is not None else None
class FailedPrecondition(BadRequest):
"""Exception mapping a :attr:`grpc.StatusCode.FAILED_PRECONDITION`
error."""
grpc_status_code = grpc.StatusCode.FAILED_PRECONDITION if grpc is not None else None
class OutOfRange(BadRequest):
"""Exception mapping a :attr:`grpc.StatusCode.OUT_OF_RANGE` error."""
grpc_status_code = grpc.StatusCode.OUT_OF_RANGE if grpc is not None else None
class Unauthorized(ClientError):
"""Exception mapping a ``401 Unauthorized`` response."""
code = http_client.UNAUTHORIZED
class Unauthenticated(Unauthorized):
"""Exception mapping a :attr:`grpc.StatusCode.UNAUTHENTICATED` error."""
grpc_status_code = grpc.StatusCode.UNAUTHENTICATED if grpc is not None else None
class Forbidden(ClientError):
"""Exception mapping a ``403 Forbidden`` response."""
code = http_client.FORBIDDEN
class PermissionDenied(Forbidden):
"""Exception mapping a :attr:`grpc.StatusCode.PERMISSION_DENIED` error."""
grpc_status_code = grpc.StatusCode.PERMISSION_DENIED if grpc is not None else None
class NotFound(ClientError):
"""Exception mapping a ``404 Not Found`` response or a
:attr:`grpc.StatusCode.NOT_FOUND` error."""
code = http_client.NOT_FOUND
grpc_status_code = grpc.StatusCode.NOT_FOUND if grpc is not None else None
class MethodNotAllowed(ClientError):
"""Exception mapping a ``405 Method Not Allowed`` response."""
code = http_client.METHOD_NOT_ALLOWED
class Conflict(ClientError):
"""Exception mapping a ``409 Conflict`` response."""
code = http_client.CONFLICT
class AlreadyExists(Conflict):
"""Exception mapping a :attr:`grpc.StatusCode.ALREADY_EXISTS` error."""
grpc_status_code = grpc.StatusCode.ALREADY_EXISTS if grpc is not None else None
class Aborted(Conflict):
"""Exception mapping a :attr:`grpc.StatusCode.ABORTED` error."""
grpc_status_code = grpc.StatusCode.ABORTED if grpc is not None else None
class LengthRequired(ClientError):
"""Exception mapping a ``411 Length Required`` response."""
code = http_client.LENGTH_REQUIRED
class PreconditionFailed(ClientError):
"""Exception mapping a ``412 Precondition Failed`` response."""
code = http_client.PRECONDITION_FAILED
class RequestRangeNotSatisfiable(ClientError):
"""Exception mapping a ``416 Request Range Not Satisfiable`` response."""
code = http_client.REQUESTED_RANGE_NOT_SATISFIABLE
class TooManyRequests(ClientError):
"""Exception mapping a ``429 Too Many Requests`` response."""
# http_client does not define a constant for this in Python 2.
code = 429
class ResourceExhausted(TooManyRequests):
"""Exception mapping a :attr:`grpc.StatusCode.RESOURCE_EXHAUSTED` error."""
grpc_status_code = grpc.StatusCode.RESOURCE_EXHAUSTED if grpc is not None else None
class Cancelled(ClientError):
"""Exception mapping a :attr:`grpc.StatusCode.CANCELLED` error."""
# This maps to HTTP status code 499. See
# https://github.com/googleapis/googleapis/blob/master/google/rpc\
# /code.proto
code = 499
grpc_status_code = grpc.StatusCode.CANCELLED if grpc is not None else None
class ServerError(GoogleAPICallError):
"""Base for 5xx responses."""
class InternalServerError(ServerError):
"""Exception mapping a ``500 Internal Server Error`` response. or a
:attr:`grpc.StatusCode.INTERNAL` error."""
code = http_client.INTERNAL_SERVER_ERROR
grpc_status_code = grpc.StatusCode.INTERNAL if grpc is not None else None
class Unknown(ServerError):
"""Exception mapping a :attr:`grpc.StatusCode.UNKNOWN` error."""
grpc_status_code = grpc.StatusCode.UNKNOWN if grpc is not None else None
class DataLoss(ServerError):
"""Exception mapping a :attr:`grpc.StatusCode.DATA_LOSS` error."""
grpc_status_code = grpc.StatusCode.DATA_LOSS if grpc is not None else None
class MethodNotImplemented(ServerError):
"""Exception mapping a ``501 Not Implemented`` response or a
:attr:`grpc.StatusCode.UNIMPLEMENTED` error."""
code = http_client.NOT_IMPLEMENTED
grpc_status_code = grpc.StatusCode.UNIMPLEMENTED if grpc is not None else None
class BadGateway(ServerError):
"""Exception mapping a ``502 Bad Gateway`` response."""
code = http_client.BAD_GATEWAY
class ServiceUnavailable(ServerError):
"""Exception mapping a ``503 Service Unavailable`` response or a
:attr:`grpc.StatusCode.UNAVAILABLE` error."""
code = http_client.SERVICE_UNAVAILABLE
grpc_status_code = grpc.StatusCode.UNAVAILABLE if grpc is not None else None
class GatewayTimeout(ServerError):
"""Exception mapping a ``504 Gateway Timeout`` response."""
code = http_client.GATEWAY_TIMEOUT
class DeadlineExceeded(GatewayTimeout):
"""Exception mapping a :attr:`grpc.StatusCode.DEADLINE_EXCEEDED` error."""
grpc_status_code = grpc.StatusCode.DEADLINE_EXCEEDED if grpc is not None else None
def exception_class_for_http_status(status_code):
"""Return the exception class for a specific HTTP status code.
Args:
status_code (int): The HTTP status code.
Returns:
:func:`type`: the appropriate subclass of :class:`GoogleAPICallError`.
"""
return _HTTP_CODE_TO_EXCEPTION.get(status_code, GoogleAPICallError)
def from_http_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from an HTTP status code.
Args:
status_code (int): The HTTP status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_http_status(status_code)
error = error_class(message, **kwargs)
if error.code is None:
error.code = status_code
return error
def from_http_response(response):
"""Create a :class:`GoogleAPICallError` from a :class:`requests.Response`.
Args:
response (requests.Response): The HTTP response.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`, with the message and errors populated
from the response.
"""
try:
payload = response.json()
except ValueError:
payload = {"error": {"message": response.text or "unknown error"}}
error_message = payload.get("error", {}).get("message", "unknown error")
errors = payload.get("error", {}).get("errors", ())
message = "{method} {url}: {error}".format(
method=response.request.method, url=response.request.url, error=error_message
)
exception = from_http_status(
response.status_code, message, errors=errors, response=response
)
return exception
def exception_class_for_grpc_status(status_code):
"""Return the exception class for a specific :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
Returns:
:func:`type`: the appropriate subclass of :class:`GoogleAPICallError`.
"""
return _GRPC_CODE_TO_EXCEPTION.get(status_code, GoogleAPICallError)
def from_grpc_status(status_code, message, **kwargs):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.StatusCode`.
Args:
status_code (grpc.StatusCode): The gRPC status code.
message (str): The exception message.
kwargs: Additional arguments passed to the :class:`GoogleAPICallError`
constructor.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
error_class = exception_class_for_grpc_status(status_code)
error = error_class(message, **kwargs)
if error.grpc_status_code is None:
error.grpc_status_code = status_code
return error
def _is_informative_grpc_error(rpc_exc):
return hasattr(rpc_exc, "code") and hasattr(rpc_exc, "details")
def from_grpc_error(rpc_exc):
"""Create a :class:`GoogleAPICallError` from a :class:`grpc.RpcError`.
Args:
rpc_exc (grpc.RpcError): The gRPC error.
Returns:
GoogleAPICallError: An instance of the appropriate subclass of
:class:`GoogleAPICallError`.
"""
# NOTE(lidiz) All gRPC error shares the parent class grpc.RpcError.
# However, check for grpc.RpcError breaks backward compatibility.
if isinstance(rpc_exc, grpc.Call) or _is_informative_grpc_error(rpc_exc):
return from_grpc_status(
rpc_exc.code(), rpc_exc.details(), errors=(rpc_exc,), response=rpc_exc
)
else:
return GoogleAPICallError(str(rpc_exc), errors=(rpc_exc,), response=rpc_exc)

View file

@ -0,0 +1,19 @@
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Futures for dealing with asynchronous operations."""
from google.api_core.future.base import Future
__all__ = ["Future"]

View file

@ -0,0 +1,39 @@
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private helpers for futures."""
import logging
import threading
_LOGGER = logging.getLogger(__name__)
def start_daemon_thread(*args, **kwargs):
"""Starts a thread and marks it as a daemon thread."""
thread = threading.Thread(*args, **kwargs)
thread.daemon = True
thread.start()
return thread
def safe_invoke_callback(callback, *args, **kwargs):
"""Invoke a callback, swallowing and logging any exceptions."""
# pylint: disable=bare-except
# We intentionally want to swallow all exceptions.
try:
return callback(*args, **kwargs)
except Exception:
_LOGGER.exception("Error while executing Future callback.")

View file

@ -0,0 +1,157 @@
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AsyncIO implementation of the abstract base Future class."""
import asyncio
from google.api_core import exceptions
from google.api_core import retry
from google.api_core import retry_async
from google.api_core.future import base
class _OperationNotComplete(Exception):
"""Private exception used for polling via retry."""
pass
RETRY_PREDICATE = retry.if_exception_type(
_OperationNotComplete,
exceptions.TooManyRequests,
exceptions.InternalServerError,
exceptions.BadGateway,
)
DEFAULT_RETRY = retry_async.AsyncRetry(predicate=RETRY_PREDICATE)
class AsyncFuture(base.Future):
"""A Future that polls peer service to self-update.
The :meth:`done` method should be implemented by subclasses. The polling
behavior will repeatedly call ``done`` until it returns True.
.. note: Privacy here is intended to prevent the final class from
overexposing, not to prevent subclasses from accessing methods.
Args:
retry (google.api_core.retry.Retry): The retry configuration used
when polling. This can be used to control how often :meth:`done`
is polled. Regardless of the retry's ``deadline``, it will be
overridden by the ``timeout`` argument to :meth:`result`.
"""
def __init__(self, retry=DEFAULT_RETRY):
super().__init__()
self._retry = retry
self._future = asyncio.get_event_loop().create_future()
self._background_task = None
async def done(self, retry=DEFAULT_RETRY):
"""Checks to see if the operation is complete.
Args:
retry (google.api_core.retry.Retry): (Optional) How to retry the RPC.
Returns:
bool: True if the operation is complete, False otherwise.
"""
# pylint: disable=redundant-returns-doc, missing-raises-doc
raise NotImplementedError()
async def _done_or_raise(self):
"""Check if the future is done and raise if it's not."""
result = await self.done()
if not result:
raise _OperationNotComplete()
async def running(self):
"""True if the operation is currently running."""
result = await self.done()
return not result
async def _blocking_poll(self, timeout=None):
"""Poll and await for the Future to be resolved.
Args:
timeout (int):
How long (in seconds) to wait for the operation to complete.
If None, wait indefinitely.
"""
if self._future.done():
return
retry_ = self._retry.with_deadline(timeout)
try:
await retry_(self._done_or_raise)()
except exceptions.RetryError:
raise asyncio.TimeoutError(
"Operation did not complete within the designated " "timeout."
)
async def result(self, timeout=None):
"""Get the result of the operation.
Args:
timeout (int):
How long (in seconds) to wait for the operation to complete.
If None, wait indefinitely.
Returns:
google.protobuf.Message: The Operation's result.
Raises:
google.api_core.GoogleAPICallError: If the operation errors or if
the timeout is reached before the operation completes.
"""
await self._blocking_poll(timeout=timeout)
return self._future.result()
async def exception(self, timeout=None):
"""Get the exception from the operation.
Args:
timeout (int): How long to wait for the operation to complete.
If None, wait indefinitely.
Returns:
Optional[google.api_core.GoogleAPICallError]: The operation's
error.
"""
await self._blocking_poll(timeout=timeout)
return self._future.exception()
def add_done_callback(self, fn):
"""Add a callback to be executed when the operation is complete.
If the operation is completed, the callback will be scheduled onto the
event loop. Otherwise, the callback will be stored and invoked when the
future is done.
Args:
fn (Callable[Future]): The callback to execute when the operation
is complete.
"""
if self._background_task is None:
self._background_task = asyncio.get_event_loop().create_task(self._blocking_poll())
self._future.add_done_callback(fn)
def set_result(self, result):
"""Set the Future's result."""
self._future.set_result(result)
def set_exception(self, exception):
"""Set the Future's exception."""
self._future.set_exception(exception)

View file

@ -0,0 +1,67 @@
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract and helper bases for Future implementations."""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Future(object):
# pylint: disable=missing-docstring
# We inherit the interfaces here from concurrent.futures.
"""Future interface.
This interface is based on :class:`concurrent.futures.Future`.
"""
@abc.abstractmethod
def cancel(self):
raise NotImplementedError()
@abc.abstractmethod
def cancelled(self):
raise NotImplementedError()
@abc.abstractmethod
def running(self):
raise NotImplementedError()
@abc.abstractmethod
def done(self):
raise NotImplementedError()
@abc.abstractmethod
def result(self, timeout=None):
raise NotImplementedError()
@abc.abstractmethod
def exception(self, timeout=None):
raise NotImplementedError()
@abc.abstractmethod
def add_done_callback(self, fn):
# pylint: disable=invalid-name
raise NotImplementedError()
@abc.abstractmethod
def set_result(self, result):
raise NotImplementedError()
@abc.abstractmethod
def set_exception(self, exception):
raise NotImplementedError()

View file

@ -0,0 +1,186 @@
# Copyright 2017, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract and helper bases for Future implementations."""
import abc
import concurrent.futures
from google.api_core import exceptions
from google.api_core import retry
from google.api_core.future import _helpers
from google.api_core.future import base
class _OperationNotComplete(Exception):
"""Private exception used for polling via retry."""
pass
RETRY_PREDICATE = retry.if_exception_type(
_OperationNotComplete,
exceptions.TooManyRequests,
exceptions.InternalServerError,
exceptions.BadGateway,
)
DEFAULT_RETRY = retry.Retry(predicate=RETRY_PREDICATE)
class PollingFuture(base.Future):
"""A Future that needs to poll some service to check its status.
The :meth:`done` method should be implemented by subclasses. The polling
behavior will repeatedly call ``done`` until it returns True.
.. note: Privacy here is intended to prevent the final class from
overexposing, not to prevent subclasses from accessing methods.
Args:
retry (google.api_core.retry.Retry): The retry configuration used
when polling. This can be used to control how often :meth:`done`
is polled. Regardless of the retry's ``deadline``, it will be
overridden by the ``timeout`` argument to :meth:`result`.
"""
def __init__(self, retry=DEFAULT_RETRY):
super(PollingFuture, self).__init__()
self._retry = retry
self._result = None
self._exception = None
self._result_set = False
"""bool: Set to True when the result has been set via set_result or
set_exception."""
self._polling_thread = None
self._done_callbacks = []
@abc.abstractmethod
def done(self, retry=DEFAULT_RETRY):
"""Checks to see if the operation is complete.
Args:
retry (google.api_core.retry.Retry): (Optional) How to retry the RPC.
Returns:
bool: True if the operation is complete, False otherwise.
"""
# pylint: disable=redundant-returns-doc, missing-raises-doc
raise NotImplementedError()
def _done_or_raise(self):
"""Check if the future is done and raise if it's not."""
if not self.done():
raise _OperationNotComplete()
def running(self):
"""True if the operation is currently running."""
return not self.done()
def _blocking_poll(self, timeout=None):
"""Poll and wait for the Future to be resolved.
Args:
timeout (int):
How long (in seconds) to wait for the operation to complete.
If None, wait indefinitely.
"""
if self._result_set:
return
retry_ = self._retry.with_deadline(timeout)
try:
retry_(self._done_or_raise)()
except exceptions.RetryError:
raise concurrent.futures.TimeoutError(
"Operation did not complete within the designated " "timeout."
)
def result(self, timeout=None):
"""Get the result of the operation, blocking if necessary.
Args:
timeout (int):
How long (in seconds) to wait for the operation to complete.
If None, wait indefinitely.
Returns:
google.protobuf.Message: The Operation's result.
Raises:
google.api_core.GoogleAPICallError: If the operation errors or if
the timeout is reached before the operation completes.
"""
self._blocking_poll(timeout=timeout)
if self._exception is not None:
# pylint: disable=raising-bad-type
# Pylint doesn't recognize that this is valid in this case.
raise self._exception
return self._result
def exception(self, timeout=None):
"""Get the exception from the operation, blocking if necessary.
Args:
timeout (int): How long to wait for the operation to complete.
If None, wait indefinitely.
Returns:
Optional[google.api_core.GoogleAPICallError]: The operation's
error.
"""
self._blocking_poll(timeout=timeout)
return self._exception
def add_done_callback(self, fn):
"""Add a callback to be executed when the operation is complete.
If the operation is not already complete, this will start a helper
thread to poll for the status of the operation in the background.
Args:
fn (Callable[Future]): The callback to execute when the operation
is complete.
"""
if self._result_set:
_helpers.safe_invoke_callback(fn, self)
return
self._done_callbacks.append(fn)
if self._polling_thread is None:
# The polling thread will exit on its own as soon as the operation
# is done.
self._polling_thread = _helpers.start_daemon_thread(
target=self._blocking_poll
)
def _invoke_callbacks(self, *args, **kwargs):
"""Invoke all done callbacks."""
for callback in self._done_callbacks:
_helpers.safe_invoke_callback(callback, *args, **kwargs)
def set_result(self, result):
"""Set the Future's result."""
self._result = result
self._result_set = True
self._invoke_callbacks(self)
def set_exception(self, exception):
"""Set the Future's exception."""
self._exception = exception
self._result_set = True
self._invoke_callbacks(self)

View file

@ -0,0 +1,28 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from google.api_core.gapic_v1 import client_info
from google.api_core.gapic_v1 import config
from google.api_core.gapic_v1 import method
from google.api_core.gapic_v1 import routing_header
__all__ = ["client_info", "config", "method", "routing_header"]
if sys.version_info >= (3, 6):
from google.api_core.gapic_v1 import config_async # noqa: F401
from google.api_core.gapic_v1 import method_async # noqa: F401
__all__.append("config_async")
__all__.append("method_async")

View file

@ -0,0 +1,55 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for providing client information.
Client information is used to send information about the calling client,
such as the library and Python version, to API services.
"""
from google.api_core import client_info
METRICS_METADATA_KEY = "x-goog-api-client"
class ClientInfo(client_info.ClientInfo):
"""Client information used to generate a user-agent for API calls.
This user-agent information is sent along with API calls to allow the
receiving service to do analytics on which versions of Python and Google
libraries are being used.
Args:
python_version (str): The Python interpreter version, for example,
``'2.7.13'``.
grpc_version (Optional[str]): The gRPC library version.
api_core_version (str): The google-api-core library version.
gapic_version (Optional[str]): The sversion of gapic-generated client
library, if the library was generated by gapic.
client_library_version (Optional[str]): The version of the client
library, generally used if the client library was not generated
by gapic or if additional functionality was built on top of
a gapic client library.
user_agent (Optional[str]): Prefix to the user agent header. This is
used to supply information such as application name or partner tool.
Recommended format: ``application-or-tool-ID/major.minor.version``.
"""
def to_grpc_metadata(self):
"""Returns the gRPC metadata for this client info."""
return (METRICS_METADATA_KEY, self.to_user_agent())
DEFAULT_CLIENT_INFO = ClientInfo()

View file

@ -0,0 +1,169 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for loading gapic configuration data.
The Google API generator creates supplementary configuration for each RPC
method to tell the client library how to deal with retries and timeouts.
"""
import collections
import grpc
import six
from google.api_core import exceptions
from google.api_core import retry
from google.api_core import timeout
_MILLIS_PER_SECOND = 1000.0
def _exception_class_for_grpc_status_name(name):
"""Returns the Google API exception class for a gRPC error code name.
Args:
name (str): The name of the gRPC status code, for example,
``UNAVAILABLE``.
Returns:
:func:`type`: The appropriate subclass of
:class:`google.api_core.exceptions.GoogleAPICallError`.
"""
return exceptions.exception_class_for_grpc_status(getattr(grpc.StatusCode, name))
def _retry_from_retry_config(retry_params, retry_codes, retry_impl=retry.Retry):
"""Creates a Retry object given a gapic retry configuration.
Args:
retry_params (dict): The retry parameter values, for example::
{
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
"initial_rpc_timeout_millis": 120000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 120000,
"total_timeout_millis": 600000
}
retry_codes (sequence[str]): The list of retryable gRPC error code
names.
Returns:
google.api_core.retry.Retry: The default retry object for the method.
"""
exception_classes = [
_exception_class_for_grpc_status_name(code) for code in retry_codes
]
return retry_impl(
retry.if_exception_type(*exception_classes),
initial=(retry_params["initial_retry_delay_millis"] / _MILLIS_PER_SECOND),
maximum=(retry_params["max_retry_delay_millis"] / _MILLIS_PER_SECOND),
multiplier=retry_params["retry_delay_multiplier"],
deadline=retry_params["total_timeout_millis"] / _MILLIS_PER_SECOND,
)
def _timeout_from_retry_config(retry_params):
"""Creates a ExponentialTimeout object given a gapic retry configuration.
Args:
retry_params (dict): The retry parameter values, for example::
{
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
"initial_rpc_timeout_millis": 120000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 120000,
"total_timeout_millis": 600000
}
Returns:
google.api_core.retry.ExponentialTimeout: The default time object for
the method.
"""
return timeout.ExponentialTimeout(
initial=(retry_params["initial_rpc_timeout_millis"] / _MILLIS_PER_SECOND),
maximum=(retry_params["max_rpc_timeout_millis"] / _MILLIS_PER_SECOND),
multiplier=retry_params["rpc_timeout_multiplier"],
deadline=(retry_params["total_timeout_millis"] / _MILLIS_PER_SECOND),
)
MethodConfig = collections.namedtuple("MethodConfig", ["retry", "timeout"])
def parse_method_configs(interface_config, retry_impl=retry.Retry):
"""Creates default retry and timeout objects for each method in a gapic
interface config.
Args:
interface_config (Mapping): The interface config section of the full
gapic library config. For example, If the full configuration has
an interface named ``google.example.v1.ExampleService`` you would
pass in just that interface's configuration, for example
``gapic_config['interfaces']['google.example.v1.ExampleService']``.
retry_impl (Callable): The constructor that creates a retry decorator
that will be applied to the method based on method configs.
Returns:
Mapping[str, MethodConfig]: A mapping of RPC method names to their
configuration.
"""
# Grab all the retry codes
retry_codes_map = {
name: retry_codes
for name, retry_codes in six.iteritems(interface_config.get("retry_codes", {}))
}
# Grab all of the retry params
retry_params_map = {
name: retry_params
for name, retry_params in six.iteritems(
interface_config.get("retry_params", {})
)
}
# Iterate through all the API methods and create a flat MethodConfig
# instance for each one.
method_configs = {}
for method_name, method_params in six.iteritems(
interface_config.get("methods", {})
):
retry_params_name = method_params.get("retry_params_name")
if retry_params_name is not None:
retry_params = retry_params_map[retry_params_name]
retry_ = _retry_from_retry_config(
retry_params, retry_codes_map[method_params["retry_codes_name"]], retry_impl
)
timeout_ = _timeout_from_retry_config(retry_params)
# No retry config, so this is a non-retryable method.
else:
retry_ = None
timeout_ = timeout.ConstantTimeout(
method_params["timeout_millis"] / _MILLIS_PER_SECOND
)
method_configs[method_name] = MethodConfig(retry=retry_, timeout=timeout_)
return method_configs

View file

@ -0,0 +1,42 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AsyncIO helpers for loading gapic configuration data.
The Google API generator creates supplementary configuration for each RPC
method to tell the client library how to deal with retries and timeouts.
"""
from google.api_core import retry_async
from google.api_core.gapic_v1 import config
from google.api_core.gapic_v1.config import MethodConfig # noqa: F401
def parse_method_configs(interface_config):
"""Creates default retry and timeout objects for each method in a gapic
interface config with AsyncIO semantics.
Args:
interface_config (Mapping): The interface config section of the full
gapic library config. For example, If the full configuration has
an interface named ``google.example.v1.ExampleService`` you would
pass in just that interface's configuration, for example
``gapic_config['interfaces']['google.example.v1.ExampleService']``.
Returns:
Mapping[str, MethodConfig]: A mapping of RPC method names to their
configuration.
"""
return config.parse_method_configs(
interface_config,
retry_impl=retry_async.AsyncRetry)

View file

@ -0,0 +1,244 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for wrapping low-level gRPC methods with common functionality.
This is used by gapic clients to provide common error mapping, retry, timeout,
pagination, and long-running operations to gRPC methods.
"""
from google.api_core import general_helpers
from google.api_core import grpc_helpers
from google.api_core import timeout
from google.api_core.gapic_v1 import client_info
USE_DEFAULT_METADATA = object()
DEFAULT = object()
"""Sentinel value indicating that a retry or timeout argument was unspecified,
so the default should be used."""
def _is_not_none_or_false(value):
return value is not None and value is not False
def _apply_decorators(func, decorators):
"""Apply a list of decorators to a given function.
``decorators`` may contain items that are ``None`` or ``False`` which will
be ignored.
"""
decorators = filter(_is_not_none_or_false, reversed(decorators))
for decorator in decorators:
func = decorator(func)
return func
def _determine_timeout(default_timeout, specified_timeout, retry):
"""Determines how timeout should be applied to a wrapped method.
Args:
default_timeout (Optional[Timeout]): The default timeout specified
at method creation time.
specified_timeout (Optional[Timeout]): The timeout specified at
invocation time. If :attr:`DEFAULT`, this will be set to
the ``default_timeout``.
retry (Optional[Retry]): The retry specified at invocation time.
Returns:
Optional[Timeout]: The timeout to apply to the method or ``None``.
"""
# If timeout is specified as a number instead of a Timeout instance,
# convert it to a ConstantTimeout.
if isinstance(specified_timeout, (int, float)):
specified_timeout = timeout.ConstantTimeout(specified_timeout)
if isinstance(default_timeout, (int, float)):
default_timeout = timeout.ConstantTimeout(default_timeout)
if specified_timeout is DEFAULT:
specified_timeout = default_timeout
if specified_timeout is default_timeout:
# If timeout is the default and the default timeout is exponential and
# a non-default retry is specified, make sure the timeout's deadline
# matches the retry's. This handles the case where the user leaves
# the timeout default but specifies a lower deadline via the retry.
if (
retry
and retry is not DEFAULT
and isinstance(default_timeout, timeout.ExponentialTimeout)
):
return default_timeout.with_deadline(retry._deadline)
else:
return default_timeout
return specified_timeout
class _GapicCallable(object):
"""Callable that applies retry, timeout, and metadata logic.
Args:
target (Callable): The low-level RPC method.
retry (google.api_core.retry.Retry): The default retry for the
callable. If ``None``, this callable will not retry by default
timeout (google.api_core.timeout.Timeout): The default timeout
for the callable. If ``None``, this callable will not specify
a timeout argument to the low-level RPC method by default.
metadata (Sequence[Tuple[str, str]]): Additional metadata that is
provided to the RPC method on every invocation. This is merged with
any metadata specified during invocation. If ``None``, no
additional metadata will be passed to the RPC method.
"""
def __init__(self, target, retry, timeout, metadata=None):
self._target = target
self._retry = retry
self._timeout = timeout
self._metadata = metadata
def __call__(self, *args, **kwargs):
"""Invoke the low-level RPC with retry, timeout, and metadata."""
# Note: Due to Python 2 lacking keyword-only arguments we use kwargs to
# extract the retry and timeout params.
timeout_ = _determine_timeout(
self._timeout,
kwargs.pop("timeout", self._timeout),
# Use only the invocation-specified retry only for this, as we only
# want to adjust the timeout deadline if the *user* specified
# a different retry.
kwargs.get("retry", None),
)
retry = kwargs.pop("retry", self._retry)
if retry is DEFAULT:
retry = self._retry
# Apply all applicable decorators.
wrapped_func = _apply_decorators(self._target, [retry, timeout_])
# Add the user agent metadata to the call.
if self._metadata is not None:
metadata = kwargs.get("metadata", [])
# Due to the nature of invocation, None should be treated the same
# as not specified.
if metadata is None:
metadata = []
metadata = list(metadata)
metadata.extend(self._metadata)
kwargs["metadata"] = metadata
return wrapped_func(*args, **kwargs)
def wrap_method(
func,
default_retry=None,
default_timeout=None,
client_info=client_info.DEFAULT_CLIENT_INFO,
):
"""Wrap an RPC method with common behavior.
This applies common error wrapping, retry, and timeout behavior a function.
The wrapped function will take optional ``retry`` and ``timeout``
arguments.
For example::
import google.api_core.gapic_v1.method
from google.api_core import retry
from google.api_core import timeout
# The original RPC method.
def get_topic(name, timeout=None):
request = publisher_v2.GetTopicRequest(name=name)
return publisher_stub.GetTopic(request, timeout=timeout)
default_retry = retry.Retry(deadline=60)
default_timeout = timeout.Timeout(deadline=60)
wrapped_get_topic = google.api_core.gapic_v1.method.wrap_method(
get_topic, default_retry)
# Execute get_topic with default retry and timeout:
response = wrapped_get_topic()
# Execute get_topic without doing any retying but with the default
# timeout:
response = wrapped_get_topic(retry=None)
# Execute get_topic but only retry on 5xx errors:
my_retry = retry.Retry(retry.if_exception_type(
exceptions.InternalServerError))
response = wrapped_get_topic(retry=my_retry)
The way this works is by late-wrapping the given function with the retry
and timeout decorators. Essentially, when ``wrapped_get_topic()`` is
called:
* ``get_topic()`` is first wrapped with the ``timeout`` into
``get_topic_with_timeout``.
* ``get_topic_with_timeout`` is wrapped with the ``retry`` into
``get_topic_with_timeout_and_retry()``.
* The final ``get_topic_with_timeout_and_retry`` is called passing through
the ``args`` and ``kwargs``.
The callstack is therefore::
method.__call__() ->
Retry.__call__() ->
Timeout.__call__() ->
wrap_errors() ->
get_topic()
Note that if ``timeout`` or ``retry`` is ``None``, then they are not
applied to the function. For example,
``wrapped_get_topic(timeout=None, retry=None)`` is more or less
equivalent to just calling ``get_topic`` but with error re-mapping.
Args:
func (Callable[Any]): The function to wrap. It should accept an
optional ``timeout`` argument. If ``metadata`` is not ``None``, it
should accept a ``metadata`` argument.
default_retry (Optional[google.api_core.Retry]): The default retry
strategy. If ``None``, the method will not retry by default.
default_timeout (Optional[google.api_core.Timeout]): The default
timeout strategy. Can also be specified as an int or float. If
``None``, the method will not have timeout specified by default.
client_info
(Optional[google.api_core.gapic_v1.client_info.ClientInfo]):
Client information used to create a user-agent string that's
passed as gRPC metadata to the method. If unspecified, then
a sane default will be used. If ``None``, then no user agent
metadata will be provided to the RPC method.
Returns:
Callable: A new callable that takes optional ``retry`` and ``timeout``
arguments and applies the common error mapping, retry, timeout,
and metadata behavior to the low-level RPC method.
"""
func = grpc_helpers.wrap_errors(func)
if client_info is not None:
user_agent_metadata = [client_info.to_grpc_metadata()]
else:
user_agent_metadata = None
return general_helpers.wraps(func)(
_GapicCallable(
func, default_retry, default_timeout, metadata=user_agent_metadata
)
)

View file

@ -0,0 +1,45 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AsyncIO helpers for wrapping gRPC methods with common functionality.
This is used by gapic clients to provide common error mapping, retry, timeout,
pagination, and long-running operations to gRPC methods.
"""
from google.api_core import general_helpers, grpc_helpers_async
from google.api_core.gapic_v1 import client_info
from google.api_core.gapic_v1.method import (_GapicCallable, # noqa: F401
DEFAULT,
USE_DEFAULT_METADATA)
def wrap_method(
func,
default_retry=None,
default_timeout=None,
client_info=client_info.DEFAULT_CLIENT_INFO,
):
"""Wrap an async RPC method with common behavior.
Returns:
Callable: A new callable that takes optional ``retry`` and ``timeout``
arguments and applies the common error mapping, retry, timeout,
and metadata behavior to the low-level RPC method.
"""
func = grpc_helpers_async.wrap_errors(func)
metadata = [client_info.to_grpc_metadata()] if client_info is not None else None
return general_helpers.wraps(func)(_GapicCallable(
func, default_retry, default_timeout, metadata=metadata))

View file

@ -0,0 +1,62 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for constructing routing headers.
These headers are used by Google infrastructure to determine how to route
requests, especially for services that are regional.
Generally, these headers are specified as gRPC metadata.
"""
import sys
from six.moves.urllib.parse import urlencode
ROUTING_METADATA_KEY = "x-goog-request-params"
def to_routing_header(params):
"""Returns a routing header string for the given request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
str: The routing header string.
"""
if sys.version_info[0] < 3:
# Python 2 does not have the "safe" parameter for urlencode.
return urlencode(params).replace("%2F", "/")
return urlencode(
params,
# Per Google API policy (go/api-url-encoding), / is not encoded.
safe="/",
)
def to_grpc_metadata(params):
"""Returns the gRPC metadata containing the routing headers for the given
request parameters.
Args:
params (Mapping[str, Any]): A dictionary containing the request
parameters used for routing.
Returns:
Tuple(str, str): The gRPC metadata containing the routing header key
and value.
"""
return (ROUTING_METADATA_KEY, to_routing_header(params))

View file

@ -0,0 +1,33 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for general Python functionality."""
import functools
import six
# functools.partial objects lack several attributes present on real function
# objects. In Python 2 wraps fails on this so use a restricted set instead.
_PARTIAL_VALID_ASSIGNMENTS = ("__doc__",)
def wraps(wrapped):
"""A functools.wraps helper that handles partial objects on Python 2."""
# https://github.com/google/pytype/issues/322
if isinstance(wrapped, functools.partial): # pytype: disable=wrong-arg-types
return six.wraps(wrapped, assigned=_PARTIAL_VALID_ASSIGNMENTS)
else:
return six.wraps(wrapped)

View file

@ -0,0 +1,466 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for :mod:`grpc`."""
import collections
import grpc
import six
from google.api_core import exceptions
from google.api_core import general_helpers
import google.auth
import google.auth.credentials
import google.auth.transport.grpc
import google.auth.transport.requests
try:
import grpc_gcp
HAS_GRPC_GCP = True
except ImportError:
HAS_GRPC_GCP = False
# The list of gRPC Callable interfaces that return iterators.
_STREAM_WRAP_CLASSES = (grpc.UnaryStreamMultiCallable, grpc.StreamStreamMultiCallable)
def _patch_callable_name(callable_):
"""Fix-up gRPC callable attributes.
gRPC callable lack the ``__name__`` attribute which causes
:func:`functools.wraps` to error. This adds the attribute if needed.
"""
if not hasattr(callable_, "__name__"):
callable_.__name__ = callable_.__class__.__name__
def _wrap_unary_errors(callable_):
"""Map errors for Unary-Unary and Stream-Unary gRPC callables."""
_patch_callable_name(callable_)
@six.wraps(callable_)
def error_remapped_callable(*args, **kwargs):
try:
return callable_(*args, **kwargs)
except grpc.RpcError as exc:
six.raise_from(exceptions.from_grpc_error(exc), exc)
return error_remapped_callable
class _StreamingResponseIterator(grpc.Call):
def __init__(self, wrapped, prefetch_first_result=True):
self._wrapped = wrapped
# This iterator is used in a retry context, and returned outside after init.
# gRPC will not throw an exception until the stream is consumed, so we need
# to retrieve the first result, in order to fail, in order to trigger a retry.
try:
if prefetch_first_result:
self._stored_first_result = six.next(self._wrapped)
except TypeError:
# It is possible the wrapped method isn't an iterable (a grpc.Call
# for instance). If this happens don't store the first result.
pass
except StopIteration:
# ignore stop iteration at this time. This should be handled outside of retry.
pass
def __iter__(self):
"""This iterator is also an iterable that returns itself."""
return self
def next(self):
"""Get the next response from the stream.
Returns:
protobuf.Message: A single response from the stream.
"""
try:
if hasattr(self, "_stored_first_result"):
result = self._stored_first_result
del self._stored_first_result
return result
return six.next(self._wrapped)
except grpc.RpcError as exc:
# If the stream has already returned data, we cannot recover here.
six.raise_from(exceptions.from_grpc_error(exc), exc)
# Alias needed for Python 2/3 support.
__next__ = next
# grpc.Call & grpc.RpcContext interface
def add_callback(self, callback):
return self._wrapped.add_callback(callback)
def cancel(self):
return self._wrapped.cancel()
def code(self):
return self._wrapped.code()
def details(self):
return self._wrapped.details()
def initial_metadata(self):
return self._wrapped.initial_metadata()
def is_active(self):
return self._wrapped.is_active()
def time_remaining(self):
return self._wrapped.time_remaining()
def trailing_metadata(self):
return self._wrapped.trailing_metadata()
def _wrap_stream_errors(callable_):
"""Wrap errors for Unary-Stream and Stream-Stream gRPC callables.
The callables that return iterators require a bit more logic to re-map
errors when iterating. This wraps both the initial invocation and the
iterator of the return value to re-map errors.
"""
_patch_callable_name(callable_)
@general_helpers.wraps(callable_)
def error_remapped_callable(*args, **kwargs):
try:
result = callable_(*args, **kwargs)
# Auto-fetching the first result causes PubSub client's streaming pull
# to hang when re-opening the stream, thus we need examine the hacky
# hidden flag to see if pre-fetching is disabled.
# https://github.com/googleapis/python-pubsub/issues/93#issuecomment-630762257
prefetch_first = getattr(callable_, "_prefetch_first_result_", True)
return _StreamingResponseIterator(result, prefetch_first_result=prefetch_first)
except grpc.RpcError as exc:
six.raise_from(exceptions.from_grpc_error(exc), exc)
return error_remapped_callable
def wrap_errors(callable_):
"""Wrap a gRPC callable and map :class:`grpc.RpcErrors` to friendly error
classes.
Errors raised by the gRPC callable are mapped to the appropriate
:class:`google.api_core.exceptions.GoogleAPICallError` subclasses.
The original `grpc.RpcError` (which is usually also a `grpc.Call`) is
available from the ``response`` property on the mapped exception. This
is useful for extracting metadata from the original error.
Args:
callable_ (Callable): A gRPC callable.
Returns:
Callable: The wrapped gRPC callable.
"""
if isinstance(callable_, _STREAM_WRAP_CLASSES):
return _wrap_stream_errors(callable_)
else:
return _wrap_unary_errors(callable_)
def _create_composite_credentials(
credentials=None,
credentials_file=None,
scopes=None,
ssl_credentials=None,
quota_project_id=None):
"""Create the composite credentials for secure channels.
Args:
credentials (google.auth.credentials.Credentials): The credentials. If
not specified, then this function will attempt to ascertain the
credentials from the environment using :func:`google.auth.default`.
credentials_file (str): A file with credentials that can be loaded with
:func:`google.auth.load_credentials_from_file`. This argument is
mutually exclusive with credentials.
scopes (Sequence[str]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
credentials. This can be used to specify different certificates.
quota_project_id (str): An optional project to use for billing and quota.
Returns:
grpc.ChannelCredentials: The composed channel credentials object.
Raises:
google.api_core.DuplicateCredentialArgs: If both a credentials object and credentials_file are passed.
"""
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials' and 'credentials_file' are mutually exclusive."
)
if credentials_file:
credentials, _ = google.auth.load_credentials_from_file(credentials_file, scopes=scopes)
elif credentials:
credentials = google.auth.credentials.with_scopes_if_required(credentials, scopes)
else:
credentials, _ = google.auth.default(scopes=scopes)
if quota_project_id and isinstance(credentials, google.auth.credentials.CredentialsWithQuotaProject):
credentials = credentials.with_quota_project(quota_project_id)
request = google.auth.transport.requests.Request()
# Create the metadata plugin for inserting the authorization header.
metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin(
credentials, request
)
# Create a set of grpc.CallCredentials using the metadata plugin.
google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)
if ssl_credentials is None:
ssl_credentials = grpc.ssl_channel_credentials()
# Combine the ssl credentials and the authorization credentials.
return grpc.composite_channel_credentials(
ssl_credentials, google_auth_credentials
)
def create_channel(
target,
credentials=None,
scopes=None,
ssl_credentials=None,
credentials_file=None,
quota_project_id=None,
**kwargs):
"""Create a secure channel with credentials.
Args:
target (str): The target service address in the format 'hostname:port'.
credentials (google.auth.credentials.Credentials): The credentials. If
not specified, then this function will attempt to ascertain the
credentials from the environment using :func:`google.auth.default`.
scopes (Sequence[str]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
credentials. This can be used to specify different certificates.
credentials_file (str): A file with credentials that can be loaded with
:func:`google.auth.load_credentials_from_file`. This argument is
mutually exclusive with credentials.
quota_project_id (str): An optional project to use for billing and quota.
kwargs: Additional key-word args passed to
:func:`grpc_gcp.secure_channel` or :func:`grpc.secure_channel`.
Returns:
grpc.Channel: The created channel.
Raises:
google.api_core.DuplicateCredentialArgs: If both a credentials object and credentials_file are passed.
"""
composite_credentials = _create_composite_credentials(
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
ssl_credentials=ssl_credentials,
quota_project_id=quota_project_id,
)
if HAS_GRPC_GCP:
# If grpc_gcp module is available use grpc_gcp.secure_channel,
# otherwise, use grpc.secure_channel to create grpc channel.
return grpc_gcp.secure_channel(target, composite_credentials, **kwargs)
else:
return grpc.secure_channel(target, composite_credentials, **kwargs)
_MethodCall = collections.namedtuple(
"_MethodCall", ("request", "timeout", "metadata", "credentials")
)
_ChannelRequest = collections.namedtuple("_ChannelRequest", ("method", "request"))
class _CallableStub(object):
"""Stub for the grpc.*MultiCallable interfaces."""
def __init__(self, method, channel):
self._method = method
self._channel = channel
self.response = None
"""Union[protobuf.Message, Callable[protobuf.Message], exception]:
The response to give when invoking this callable. If this is a
callable, it will be invoked with the request protobuf. If it's an
exception, the exception will be raised when this is invoked.
"""
self.responses = None
"""Iterator[
Union[protobuf.Message, Callable[protobuf.Message], exception]]:
An iterator of responses. If specified, self.response will be populated
on each invocation by calling ``next(self.responses)``."""
self.requests = []
"""List[protobuf.Message]: All requests sent to this callable."""
self.calls = []
"""List[Tuple]: All invocations of this callable. Each tuple is the
request, timeout, metadata, and credentials."""
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self._channel.requests.append(_ChannelRequest(self._method, request))
self.calls.append(_MethodCall(request, timeout, metadata, credentials))
self.requests.append(request)
response = self.response
if self.responses is not None:
if response is None:
response = next(self.responses)
else:
raise ValueError(
"{method}.response and {method}.responses are mutually "
"exclusive.".format(method=self._method)
)
if callable(response):
return response(request)
if isinstance(response, Exception):
raise response
if response is not None:
return response
raise ValueError('Method stub for "{}" has no response.'.format(self._method))
def _simplify_method_name(method):
"""Simplifies a gRPC method name.
When gRPC invokes the channel to create a callable, it gives a full
method name like "/google.pubsub.v1.Publisher/CreateTopic". This
returns just the name of the method, in this case "CreateTopic".
Args:
method (str): The name of the method.
Returns:
str: The simplified name of the method.
"""
return method.rsplit("/", 1).pop()
class ChannelStub(grpc.Channel):
"""A testing stub for the grpc.Channel interface.
This can be used to test any client that eventually uses a gRPC channel
to communicate. By passing in a channel stub, you can configure which
responses are returned and track which requests are made.
For example:
.. code-block:: python
channel_stub = grpc_helpers.ChannelStub()
client = FooClient(channel=channel_stub)
channel_stub.GetFoo.response = foo_pb2.Foo(name='bar')
foo = client.get_foo(labels=['baz'])
assert foo.name == 'bar'
assert channel_stub.GetFoo.requests[0].labels = ['baz']
Each method on the stub can be accessed and configured on the channel.
Here's some examples of various configurations:
.. code-block:: python
# Return a basic response:
channel_stub.GetFoo.response = foo_pb2.Foo(name='bar')
assert client.get_foo().name == 'bar'
# Raise an exception:
channel_stub.GetFoo.response = NotFound('...')
with pytest.raises(NotFound):
client.get_foo()
# Use a sequence of responses:
channel_stub.GetFoo.responses = iter([
foo_pb2.Foo(name='bar'),
foo_pb2.Foo(name='baz'),
])
assert client.get_foo().name == 'bar'
assert client.get_foo().name == 'baz'
# Use a callable
def on_get_foo(request):
return foo_pb2.Foo(name='bar' + request.id)
channel_stub.GetFoo.response = on_get_foo
assert client.get_foo(id='123').name == 'bar123'
"""
def __init__(self, responses=[]):
self.requests = []
"""Sequence[Tuple[str, protobuf.Message]]: A list of all requests made
on this channel in order. The tuple is of method name, request
message."""
self._method_stubs = {}
def _stub_for_method(self, method):
method = _simplify_method_name(method)
self._method_stubs[method] = _CallableStub(method, self)
return self._method_stubs[method]
def __getattr__(self, key):
try:
return self._method_stubs[key]
except KeyError:
raise AttributeError
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
"""grpc.Channel.unary_unary implementation."""
return self._stub_for_method(method)
def unary_stream(self, method, request_serializer=None, response_deserializer=None):
"""grpc.Channel.unary_stream implementation."""
return self._stub_for_method(method)
def stream_unary(self, method, request_serializer=None, response_deserializer=None):
"""grpc.Channel.stream_unary implementation."""
return self._stub_for_method(method)
def stream_stream(
self, method, request_serializer=None, response_deserializer=None
):
"""grpc.Channel.stream_stream implementation."""
return self._stub_for_method(method)
def subscribe(self, callback, try_to_connect=False):
"""grpc.Channel.subscribe implementation."""
pass
def unsubscribe(self, callback):
"""grpc.Channel.unsubscribe implementation."""
pass
def close(self):
"""grpc.Channel.close implementation."""
pass

View file

@ -0,0 +1,289 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AsyncIO helpers for :mod:`grpc` supporting 3.6+.
Please combine more detailed docstring in grpc_helpers.py to use following
functions. This module is implementing the same surface with AsyncIO semantics.
"""
import asyncio
import functools
import grpc
from grpc.experimental import aio
from google.api_core import exceptions, grpc_helpers
# TODO(lidiz) Support gRPC GCP wrapper
HAS_GRPC_GCP = False
# NOTE(lidiz) Alternatively, we can hack "__getattribute__" to perform
# automatic patching for us. But that means the overhead of creating an
# extra Python function spreads to every single send and receive.
class _WrappedCall(aio.Call):
def __init__(self):
self._call = None
def with_call(self, call):
"""Supplies the call object separately to keep __init__ clean."""
self._call = call
return self
async def initial_metadata(self):
return await self._call.initial_metadata()
async def trailing_metadata(self):
return await self._call.trailing_metadata()
async def code(self):
return await self._call.code()
async def details(self):
return await self._call.details()
def cancelled(self):
return self._call.cancelled()
def done(self):
return self._call.done()
def time_remaining(self):
return self._call.time_remaining()
def cancel(self):
return self._call.cancel()
def add_done_callback(self, callback):
self._call.add_done_callback(callback)
async def wait_for_connection(self):
try:
await self._call.wait_for_connection()
except grpc.RpcError as rpc_error:
raise exceptions.from_grpc_error(rpc_error) from rpc_error
class _WrappedUnaryResponseMixin(_WrappedCall):
def __await__(self):
try:
response = yield from self._call.__await__()
return response
except grpc.RpcError as rpc_error:
raise exceptions.from_grpc_error(rpc_error) from rpc_error
class _WrappedStreamResponseMixin(_WrappedCall):
def __init__(self):
self._wrapped_async_generator = None
async def read(self):
try:
return await self._call.read()
except grpc.RpcError as rpc_error:
raise exceptions.from_grpc_error(rpc_error) from rpc_error
async def _wrapped_aiter(self):
try:
# NOTE(lidiz) coverage doesn't understand the exception raised from
# __anext__ method. It is covered by test case:
# test_wrap_stream_errors_aiter_non_rpc_error
async for response in self._call: # pragma: no branch
yield response
except grpc.RpcError as rpc_error:
raise exceptions.from_grpc_error(rpc_error) from rpc_error
def __aiter__(self):
if not self._wrapped_async_generator:
self._wrapped_async_generator = self._wrapped_aiter()
return self._wrapped_async_generator
class _WrappedStreamRequestMixin(_WrappedCall):
async def write(self, request):
try:
await self._call.write(request)
except grpc.RpcError as rpc_error:
raise exceptions.from_grpc_error(rpc_error) from rpc_error
async def done_writing(self):
try:
await self._call.done_writing()
except grpc.RpcError as rpc_error:
raise exceptions.from_grpc_error(rpc_error) from rpc_error
# NOTE(lidiz) Implementing each individual class separately, so we don't
# expose any API that should not be seen. E.g., __aiter__ in unary-unary
# RPC, or __await__ in stream-stream RPC.
class _WrappedUnaryUnaryCall(_WrappedUnaryResponseMixin, aio.UnaryUnaryCall):
"""Wrapped UnaryUnaryCall to map exceptions."""
class _WrappedUnaryStreamCall(_WrappedStreamResponseMixin, aio.UnaryStreamCall):
"""Wrapped UnaryStreamCall to map exceptions."""
class _WrappedStreamUnaryCall(_WrappedUnaryResponseMixin, _WrappedStreamRequestMixin, aio.StreamUnaryCall):
"""Wrapped StreamUnaryCall to map exceptions."""
class _WrappedStreamStreamCall(_WrappedStreamRequestMixin, _WrappedStreamResponseMixin, aio.StreamStreamCall):
"""Wrapped StreamStreamCall to map exceptions."""
def _wrap_unary_errors(callable_):
"""Map errors for Unary-Unary async callables."""
grpc_helpers._patch_callable_name(callable_)
@functools.wraps(callable_)
def error_remapped_callable(*args, **kwargs):
call = callable_(*args, **kwargs)
return _WrappedUnaryUnaryCall().with_call(call)
return error_remapped_callable
def _wrap_stream_errors(callable_):
"""Map errors for streaming RPC async callables."""
grpc_helpers._patch_callable_name(callable_)
@functools.wraps(callable_)
async def error_remapped_callable(*args, **kwargs):
call = callable_(*args, **kwargs)
if isinstance(call, aio.UnaryStreamCall):
call = _WrappedUnaryStreamCall().with_call(call)
elif isinstance(call, aio.StreamUnaryCall):
call = _WrappedStreamUnaryCall().with_call(call)
elif isinstance(call, aio.StreamStreamCall):
call = _WrappedStreamStreamCall().with_call(call)
else:
raise TypeError('Unexpected type of call %s' % type(call))
await call.wait_for_connection()
return call
return error_remapped_callable
def wrap_errors(callable_):
"""Wrap a gRPC async callable and map :class:`grpc.RpcErrors` to
friendly error classes.
Errors raised by the gRPC callable are mapped to the appropriate
:class:`google.api_core.exceptions.GoogleAPICallError` subclasses. The
original `grpc.RpcError` (which is usually also a `grpc.Call`) is
available from the ``response`` property on the mapped exception. This
is useful for extracting metadata from the original error.
Args:
callable_ (Callable): A gRPC callable.
Returns: Callable: The wrapped gRPC callable.
"""
if isinstance(callable_, aio.UnaryUnaryMultiCallable):
return _wrap_unary_errors(callable_)
else:
return _wrap_stream_errors(callable_)
def create_channel(
target,
credentials=None,
scopes=None,
ssl_credentials=None,
credentials_file=None,
quota_project_id=None,
**kwargs):
"""Create an AsyncIO secure channel with credentials.
Args:
target (str): The target service address in the format 'hostname:port'.
credentials (google.auth.credentials.Credentials): The credentials. If
not specified, then this function will attempt to ascertain the
credentials from the environment using :func:`google.auth.default`.
scopes (Sequence[str]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
ssl_credentials (grpc.ChannelCredentials): Optional SSL channel
credentials. This can be used to specify different certificates.
credentials_file (str): A file with credentials that can be loaded with
:func:`google.auth.load_credentials_from_file`. This argument is
mutually exclusive with credentials.
quota_project_id (str): An optional project to use for billing and quota.
kwargs: Additional key-word args passed to :func:`aio.secure_channel`.
Returns:
aio.Channel: The created channel.
Raises:
google.api_core.DuplicateCredentialArgs: If both a credentials object and credentials_file are passed.
"""
composite_credentials = grpc_helpers._create_composite_credentials(
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
ssl_credentials=ssl_credentials,
quota_project_id=quota_project_id,
)
return aio.secure_channel(target, composite_credentials, **kwargs)
class FakeUnaryUnaryCall(_WrappedUnaryUnaryCall):
"""Fake implementation for unary-unary RPCs.
It is a dummy object for response message. Supply the intended response
upon the initialization, and the coroutine will return the exact response
message.
"""
def __init__(self, response=object()):
self.response = response
self._future = asyncio.get_event_loop().create_future()
self._future.set_result(self.response)
def __await__(self):
response = yield from self._future.__await__()
return response
class FakeStreamUnaryCall(_WrappedStreamUnaryCall):
"""Fake implementation for stream-unary RPCs.
It is a dummy object for response message. Supply the intended response
upon the initialization, and the coroutine will return the exact response
message.
"""
def __init__(self, response=object()):
self.response = response
self._future = asyncio.get_event_loop().create_future()
self._future.set_result(self.response)
def __await__(self):
response = yield from self._future.__await__()
return response
async def wait_for_connection(self):
pass

View file

@ -0,0 +1,460 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Non-API-specific IAM policy definitions
For allowed roles / permissions, see:
https://cloud.google.com/iam/docs/understanding-roles
Example usage:
.. code-block:: python
# ``get_iam_policy`` returns a :class:'~google.api_core.iam.Policy`.
policy = resource.get_iam_policy(requested_policy_version=3)
phred = "user:phred@example.com"
admin_group = "group:admins@groups.example.com"
account = "serviceAccount:account-1234@accounts.example.com"
policy.version = 3
policy.bindings = [
{
"role": "roles/owner",
"members": {phred, admin_group, account}
},
{
"role": "roles/editor",
"members": {"allAuthenticatedUsers"}
},
{
"role": "roles/viewer",
"members": {"allUsers"}
"condition": {
"title": "request_time",
"description": "Requests made before 2021-01-01T00:00:00Z",
"expression": "request.time < timestamp(\"2021-01-01T00:00:00Z\")"
}
}
]
resource.set_iam_policy(policy)
"""
import collections
import operator
import warnings
try:
from collections import abc as collections_abc
except ImportError: # Python 2.7
import collections as collections_abc
# Generic IAM roles
OWNER_ROLE = "roles/owner"
"""Generic role implying all rights to an object."""
EDITOR_ROLE = "roles/editor"
"""Generic role implying rights to modify an object."""
VIEWER_ROLE = "roles/viewer"
"""Generic role implying rights to access an object."""
_ASSIGNMENT_DEPRECATED_MSG = """\
Assigning to '{}' is deprecated. Use the `policy.bindings` property to modify bindings instead."""
_FACTORY_DEPRECATED_MSG = """\
Factory method {0} is deprecated. Replace with '{0}'."""
_DICT_ACCESS_MSG = """\
Dict access is not supported on policies with version > 1 or with conditional bindings."""
class InvalidOperationException(Exception):
"""Raised when trying to use Policy class as a dict."""
pass
class Policy(collections_abc.MutableMapping):
"""IAM Policy
Args:
etag (Optional[str]): ETag used to identify a unique of the policy
version (Optional[int]): The syntax schema version of the policy.
Note:
Using conditions in bindings requires the policy's version to be set
to `3` or greater, depending on the versions that are currently supported.
Accessing the policy using dict operations will raise InvalidOperationException
when the policy's version is set to 3.
Use the policy.bindings getter/setter to retrieve and modify the policy's bindings.
See:
IAM Policy https://cloud.google.com/iam/reference/rest/v1/Policy
Policy versions https://cloud.google.com/iam/docs/policies#versions
Conditions overview https://cloud.google.com/iam/docs/conditions-overview.
"""
_OWNER_ROLES = (OWNER_ROLE,)
"""Roles mapped onto our ``owners`` attribute."""
_EDITOR_ROLES = (EDITOR_ROLE,)
"""Roles mapped onto our ``editors`` attribute."""
_VIEWER_ROLES = (VIEWER_ROLE,)
"""Roles mapped onto our ``viewers`` attribute."""
def __init__(self, etag=None, version=None):
self.etag = etag
self.version = version
self._bindings = []
def __iter__(self):
self.__check_version__()
return (binding["role"] for binding in self._bindings)
def __len__(self):
self.__check_version__()
return len(self._bindings)
def __getitem__(self, key):
self.__check_version__()
for b in self._bindings:
if b["role"] == key:
return b["members"]
return set()
def __setitem__(self, key, value):
self.__check_version__()
value = set(value)
for binding in self._bindings:
if binding["role"] == key:
binding["members"] = value
return
self._bindings.append({"role": key, "members": value})
def __delitem__(self, key):
self.__check_version__()
for b in self._bindings:
if b["role"] == key:
self._bindings.remove(b)
return
raise KeyError(key)
def __check_version__(self):
"""Raise InvalidOperationException if version is greater than 1 or policy contains conditions."""
raise_version = self.version is not None and self.version > 1
if raise_version or self._contains_conditions():
raise InvalidOperationException(_DICT_ACCESS_MSG)
def _contains_conditions(self):
for b in self._bindings:
if b.get("condition") is not None:
return True
return False
@property
def bindings(self):
"""The policy's list of bindings.
A binding is specified by a dictionary with keys:
* role (str): Role that is assigned to `members`.
* members (:obj:`set` of str): Specifies the identities associated to this binding.
* condition (:obj:`dict` of str:str): Specifies a condition under which this binding will apply.
* title (str): Title for the condition.
* description (:obj:str, optional): Description of the condition.
* expression: A CEL expression.
Type:
:obj:`list` of :obj:`dict`
See:
Policy versions https://cloud.google.com/iam/docs/policies#versions
Conditions overview https://cloud.google.com/iam/docs/conditions-overview.
Example:
.. code-block:: python
USER = "user:phred@example.com"
ADMIN_GROUP = "group:admins@groups.example.com"
SERVICE_ACCOUNT = "serviceAccount:account-1234@accounts.example.com"
CONDITION = {
"title": "request_time",
"description": "Requests made before 2021-01-01T00:00:00Z", # Optional
"expression": "request.time < timestamp(\"2021-01-01T00:00:00Z\")"
}
# Set policy's version to 3 before setting bindings containing conditions.
policy.version = 3
policy.bindings = [
{
"role": "roles/viewer",
"members": {USER, ADMIN_GROUP, SERVICE_ACCOUNT},
"condition": CONDITION
},
...
]
"""
return self._bindings
@bindings.setter
def bindings(self, bindings):
self._bindings = bindings
@property
def owners(self):
"""Legacy access to owner role.
Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
DEPRECATED: use `policy.bindings` to access bindings instead.
"""
result = set()
for role in self._OWNER_ROLES:
for member in self.get(role, ()):
result.add(member)
return frozenset(result)
@owners.setter
def owners(self, value):
"""Update owners.
Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
DEPRECATED: use `policy.bindings` to access bindings instead.
"""
warnings.warn(
_ASSIGNMENT_DEPRECATED_MSG.format("owners", OWNER_ROLE), DeprecationWarning
)
self[OWNER_ROLE] = value
@property
def editors(self):
"""Legacy access to editor role.
Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
DEPRECATED: use `policy.bindings` to access bindings instead.
"""
result = set()
for role in self._EDITOR_ROLES:
for member in self.get(role, ()):
result.add(member)
return frozenset(result)
@editors.setter
def editors(self, value):
"""Update editors.
Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
DEPRECATED: use `policy.bindings` to modify bindings instead.
"""
warnings.warn(
_ASSIGNMENT_DEPRECATED_MSG.format("editors", EDITOR_ROLE),
DeprecationWarning,
)
self[EDITOR_ROLE] = value
@property
def viewers(self):
"""Legacy access to viewer role.
Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
DEPRECATED: use `policy.bindings` to modify bindings instead.
"""
result = set()
for role in self._VIEWER_ROLES:
for member in self.get(role, ()):
result.add(member)
return frozenset(result)
@viewers.setter
def viewers(self, value):
"""Update viewers.
Raise InvalidOperationException if version is greater than 1 or policy contains conditions.
DEPRECATED: use `policy.bindings` to modify bindings instead.
"""
warnings.warn(
_ASSIGNMENT_DEPRECATED_MSG.format("viewers", VIEWER_ROLE),
DeprecationWarning,
)
self[VIEWER_ROLE] = value
@staticmethod
def user(email):
"""Factory method for a user member.
Args:
email (str): E-mail for this particular user.
Returns:
str: A member string corresponding to the given user.
DEPRECATED: set the role `user:{email}` in the binding instead.
"""
warnings.warn(
_FACTORY_DEPRECATED_MSG.format("user:{email}"), DeprecationWarning,
)
return "user:%s" % (email,)
@staticmethod
def service_account(email):
"""Factory method for a service account member.
Args:
email (str): E-mail for this particular service account.
Returns:
str: A member string corresponding to the given service account.
DEPRECATED: set the role `serviceAccount:{email}` in the binding instead.
"""
warnings.warn(
_FACTORY_DEPRECATED_MSG.format("serviceAccount:{email}"),
DeprecationWarning,
)
return "serviceAccount:%s" % (email,)
@staticmethod
def group(email):
"""Factory method for a group member.
Args:
email (str): An id or e-mail for this particular group.
Returns:
str: A member string corresponding to the given group.
DEPRECATED: set the role `group:{email}` in the binding instead.
"""
warnings.warn(
_FACTORY_DEPRECATED_MSG.format("group:{email}"), DeprecationWarning,
)
return "group:%s" % (email,)
@staticmethod
def domain(domain):
"""Factory method for a domain member.
Args:
domain (str): The domain for this member.
Returns:
str: A member string corresponding to the given domain.
DEPRECATED: set the role `domain:{email}` in the binding instead.
"""
warnings.warn(
_FACTORY_DEPRECATED_MSG.format("domain:{email}"), DeprecationWarning,
)
return "domain:%s" % (domain,)
@staticmethod
def all_users():
"""Factory method for a member representing all users.
Returns:
str: A member string representing all users.
DEPRECATED: set the role `allUsers` in the binding instead.
"""
warnings.warn(
_FACTORY_DEPRECATED_MSG.format("allUsers"), DeprecationWarning,
)
return "allUsers"
@staticmethod
def authenticated_users():
"""Factory method for a member representing all authenticated users.
Returns:
str: A member string representing all authenticated users.
DEPRECATED: set the role `allAuthenticatedUsers` in the binding instead.
"""
warnings.warn(
_FACTORY_DEPRECATED_MSG.format("allAuthenticatedUsers"), DeprecationWarning,
)
return "allAuthenticatedUsers"
@classmethod
def from_api_repr(cls, resource):
"""Factory: create a policy from a JSON resource.
Args:
resource (dict): policy resource returned by ``getIamPolicy`` API.
Returns:
:class:`Policy`: the parsed policy
"""
version = resource.get("version")
etag = resource.get("etag")
policy = cls(etag, version)
policy.bindings = resource.get("bindings", [])
for binding in policy.bindings:
binding["members"] = set(binding.get("members", ()))
return policy
def to_api_repr(self):
"""Render a JSON policy resource.
Returns:
dict: a resource to be passed to the ``setIamPolicy`` API.
"""
resource = {}
if self.etag is not None:
resource["etag"] = self.etag
if self.version is not None:
resource["version"] = self.version
if self._bindings and len(self._bindings) > 0:
bindings = []
for binding in self._bindings:
members = binding.get("members")
if members:
new_binding = {
"role": binding["role"],
"members": sorted(members)
}
condition = binding.get("condition")
if condition:
new_binding["condition"] = condition
bindings.append(new_binding)
if bindings:
# Sort bindings by role
key = operator.itemgetter("role")
resource["bindings"] = sorted(bindings, key=key)
return resource

View file

@ -0,0 +1,327 @@
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Futures for long-running operations returned from Google Cloud APIs.
These futures can be used to synchronously wait for the result of a
long-running operation using :meth:`Operation.result`:
.. code-block:: python
operation = my_api_client.long_running_method()
result = operation.result()
Or asynchronously using callbacks and :meth:`Operation.add_done_callback`:
.. code-block:: python
operation = my_api_client.long_running_method()
def my_callback(future):
result = future.result()
operation.add_done_callback(my_callback)
"""
import functools
import threading
from google.api_core import exceptions
from google.api_core import protobuf_helpers
from google.api_core.future import polling
from google.longrunning import operations_pb2
from google.protobuf import json_format
from google.rpc import code_pb2
class Operation(polling.PollingFuture):
"""A Future for interacting with a Google API Long-Running Operation.
Args:
operation (google.longrunning.operations_pb2.Operation): The
initial operation.
refresh (Callable[[], ~.api_core.operation.Operation]): A callable that
returns the latest state of the operation.
cancel (Callable[[], None]): A callable that tries to cancel
the operation.
result_type (func:`type`): The protobuf type for the operation's
result.
metadata_type (func:`type`): The protobuf type for the operation's
metadata.
retry (google.api_core.retry.Retry): The retry configuration used
when polling. This can be used to control how often :meth:`done`
is polled. Regardless of the retry's ``deadline``, it will be
overridden by the ``timeout`` argument to :meth:`result`.
"""
def __init__(
self,
operation,
refresh,
cancel,
result_type,
metadata_type=None,
retry=polling.DEFAULT_RETRY,
):
super(Operation, self).__init__(retry=retry)
self._operation = operation
self._refresh = refresh
self._cancel = cancel
self._result_type = result_type
self._metadata_type = metadata_type
self._completion_lock = threading.Lock()
# Invoke this in case the operation came back already complete.
self._set_result_from_operation()
@property
def operation(self):
"""google.longrunning.Operation: The current long-running operation."""
return self._operation
@property
def metadata(self):
"""google.protobuf.Message: the current operation metadata."""
if not self._operation.HasField("metadata"):
return None
return protobuf_helpers.from_any_pb(
self._metadata_type, self._operation.metadata
)
@classmethod
def deserialize(self, payload):
"""Deserialize a ``google.longrunning.Operation`` protocol buffer.
Args:
payload (bytes): A serialized operation protocol buffer.
Returns:
~.operations_pb2.Operation: An Operation protobuf object.
"""
return operations_pb2.Operation.FromString(payload)
def _set_result_from_operation(self):
"""Set the result or exception from the operation if it is complete."""
# This must be done in a lock to prevent the polling thread
# and main thread from both executing the completion logic
# at the same time.
with self._completion_lock:
# If the operation isn't complete or if the result has already been
# set, do not call set_result/set_exception again.
# Note: self._result_set is set to True in set_result and
# set_exception, in case those methods are invoked directly.
if not self._operation.done or self._result_set:
return
if self._operation.HasField("response"):
response = protobuf_helpers.from_any_pb(
self._result_type, self._operation.response
)
self.set_result(response)
elif self._operation.HasField("error"):
exception = exceptions.GoogleAPICallError(
self._operation.error.message,
errors=(self._operation.error,),
response=self._operation,
)
self.set_exception(exception)
else:
exception = exceptions.GoogleAPICallError(
"Unexpected state: Long-running operation had neither "
"response nor error set."
)
self.set_exception(exception)
def _refresh_and_update(self, retry=polling.DEFAULT_RETRY):
"""Refresh the operation and update the result if needed.
Args:
retry (google.api_core.retry.Retry): (Optional) How to retry the RPC.
"""
# If the currently cached operation is done, no need to make another
# RPC as it will not change once done.
if not self._operation.done:
self._operation = self._refresh(retry=retry)
self._set_result_from_operation()
def done(self, retry=polling.DEFAULT_RETRY):
"""Checks to see if the operation is complete.
Args:
retry (google.api_core.retry.Retry): (Optional) How to retry the RPC.
Returns:
bool: True if the operation is complete, False otherwise.
"""
self._refresh_and_update(retry)
return self._operation.done
def cancel(self):
"""Attempt to cancel the operation.
Returns:
bool: True if the cancel RPC was made, False if the operation is
already complete.
"""
if self.done():
return False
self._cancel()
return True
def cancelled(self):
"""True if the operation was cancelled."""
self._refresh_and_update()
return (
self._operation.HasField("error")
and self._operation.error.code == code_pb2.CANCELLED
)
def _refresh_http(api_request, operation_name):
"""Refresh an operation using a JSON/HTTP client.
Args:
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation.
"""
path = "operations/{}".format(operation_name)
api_response = api_request(method="GET", path=path)
return json_format.ParseDict(api_response, operations_pb2.Operation())
def _cancel_http(api_request, operation_name):
"""Cancel an operation using a JSON/HTTP client.
Args:
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
operation_name (str): The name of the operation.
"""
path = "operations/{}:cancel".format(operation_name)
api_request(method="POST", path=path)
def from_http_json(operation, api_request, result_type, **kwargs):
"""Create an operation future using a HTTP/JSON client.
This interacts with the long-running operations `service`_ (specific
to a given API) via `HTTP/JSON`_.
.. _HTTP/JSON: https://cloud.google.com/speech/reference/rest/\
v1beta1/operations#Operation
Args:
operation (dict): Operation as a dictionary.
api_request (Callable): A callable used to make an API request. This
should generally be
:meth:`google.cloud._http.Connection.api_request`.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
operation_proto = json_format.ParseDict(operation, operations_pb2.Operation())
refresh = functools.partial(_refresh_http, api_request, operation_proto.name)
cancel = functools.partial(_cancel_http, api_request, operation_proto.name)
return Operation(operation_proto, refresh, cancel, result_type, **kwargs)
def _refresh_grpc(operations_stub, operation_name):
"""Refresh an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
Returns:
google.longrunning.operations_pb2.Operation: The operation.
"""
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
return operations_stub.GetOperation(request_pb)
def _cancel_grpc(operations_stub, operation_name):
"""Cancel an operation using a gRPC client.
Args:
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The gRPC operations stub.
operation_name (str): The name of the operation.
"""
request_pb = operations_pb2.CancelOperationRequest(name=operation_name)
operations_stub.CancelOperation(request_pb)
def from_grpc(operation, operations_stub, result_type, **kwargs):
"""Create an operation future using a gRPC client.
This interacts with the long-running operations `service`_ (specific
to a given API) via gRPC.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_stub (google.longrunning.operations_pb2.OperationsStub):
The operations stub.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(_refresh_grpc, operations_stub, operation.name)
cancel = functools.partial(_cancel_grpc, operations_stub, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs)
def from_gapic(operation, operations_client, result_type, **kwargs):
"""Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(operations_client.get_operation, operation.name)
cancel = functools.partial(operations_client.cancel_operation, operation.name)
return Operation(operation, refresh, cancel, result_type, **kwargs)

View file

@ -0,0 +1,215 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AsyncIO futures for long-running operations returned from Google Cloud APIs.
These futures can be used to await for the result of a long-running operation
using :meth:`AsyncOperation.result`:
.. code-block:: python
operation = my_api_client.long_running_method()
result = await operation.result()
Or asynchronously using callbacks and :meth:`Operation.add_done_callback`:
.. code-block:: python
operation = my_api_client.long_running_method()
def my_callback(future):
result = await future.result()
operation.add_done_callback(my_callback)
"""
import functools
import threading
from google.api_core import exceptions
from google.api_core import protobuf_helpers
from google.api_core.future import async_future
from google.longrunning import operations_pb2
from google.rpc import code_pb2
class AsyncOperation(async_future.AsyncFuture):
"""A Future for interacting with a Google API Long-Running Operation.
Args:
operation (google.longrunning.operations_pb2.Operation): The
initial operation.
refresh (Callable[[], ~.api_core.operation.Operation]): A callable that
returns the latest state of the operation.
cancel (Callable[[], None]): A callable that tries to cancel
the operation.
result_type (func:`type`): The protobuf type for the operation's
result.
metadata_type (func:`type`): The protobuf type for the operation's
metadata.
retry (google.api_core.retry.Retry): The retry configuration used
when polling. This can be used to control how often :meth:`done`
is polled. Regardless of the retry's ``deadline``, it will be
overridden by the ``timeout`` argument to :meth:`result`.
"""
def __init__(
self,
operation,
refresh,
cancel,
result_type,
metadata_type=None,
retry=async_future.DEFAULT_RETRY,
):
super().__init__(retry=retry)
self._operation = operation
self._refresh = refresh
self._cancel = cancel
self._result_type = result_type
self._metadata_type = metadata_type
self._completion_lock = threading.Lock()
# Invoke this in case the operation came back already complete.
self._set_result_from_operation()
@property
def operation(self):
"""google.longrunning.Operation: The current long-running operation."""
return self._operation
@property
def metadata(self):
"""google.protobuf.Message: the current operation metadata."""
if not self._operation.HasField("metadata"):
return None
return protobuf_helpers.from_any_pb(
self._metadata_type, self._operation.metadata
)
@classmethod
def deserialize(cls, payload):
"""Deserialize a ``google.longrunning.Operation`` protocol buffer.
Args:
payload (bytes): A serialized operation protocol buffer.
Returns:
~.operations_pb2.Operation: An Operation protobuf object.
"""
return operations_pb2.Operation.FromString(payload)
def _set_result_from_operation(self):
"""Set the result or exception from the operation if it is complete."""
# This must be done in a lock to prevent the async_future thread
# and main thread from both executing the completion logic
# at the same time.
with self._completion_lock:
# If the operation isn't complete or if the result has already been
# set, do not call set_result/set_exception again.
if not self._operation.done or self._future.done():
return
if self._operation.HasField("response"):
response = protobuf_helpers.from_any_pb(
self._result_type, self._operation.response
)
self.set_result(response)
elif self._operation.HasField("error"):
exception = exceptions.GoogleAPICallError(
self._operation.error.message,
errors=(self._operation.error,),
response=self._operation,
)
self.set_exception(exception)
else:
exception = exceptions.GoogleAPICallError(
"Unexpected state: Long-running operation had neither "
"response nor error set."
)
self.set_exception(exception)
async def _refresh_and_update(self, retry=async_future.DEFAULT_RETRY):
"""Refresh the operation and update the result if needed.
Args:
retry (google.api_core.retry.Retry): (Optional) How to retry the RPC.
"""
# If the currently cached operation is done, no need to make another
# RPC as it will not change once done.
if not self._operation.done:
self._operation = await self._refresh(retry=retry)
self._set_result_from_operation()
async def done(self, retry=async_future.DEFAULT_RETRY):
"""Checks to see if the operation is complete.
Args:
retry (google.api_core.retry.Retry): (Optional) How to retry the RPC.
Returns:
bool: True if the operation is complete, False otherwise.
"""
await self._refresh_and_update(retry)
return self._operation.done
async def cancel(self):
"""Attempt to cancel the operation.
Returns:
bool: True if the cancel RPC was made, False if the operation is
already complete.
"""
result = await self.done()
if result:
return False
else:
await self._cancel()
return True
async def cancelled(self):
"""True if the operation was cancelled."""
await self._refresh_and_update()
return (
self._operation.HasField("error")
and self._operation.error.code == code_pb2.CANCELLED
)
def from_gapic(operation, operations_client, result_type, **kwargs):
"""Create an operation future from a gapic client.
This interacts with the long-running operations `service`_ (specific
to a given API) via a gapic client.
.. _service: https://github.com/googleapis/googleapis/blob/\
050400df0fdb16f63b63e9dee53819044bffc857/\
google/longrunning/operations.proto#L38
Args:
operation (google.longrunning.operations_pb2.Operation): The operation.
operations_client (google.api_core.operations_v1.OperationsClient):
The operations client.
result_type (:func:`type`): The protobuf result type.
kwargs: Keyword args passed into the :class:`Operation` constructor.
Returns:
~.api_core.operation.Operation: The operation future to track the given
operation.
"""
refresh = functools.partial(operations_client.get_operation, operation.name)
cancel = functools.partial(operations_client.cancel_operation, operation.name)
return AsyncOperation(operation, refresh, cancel, result_type, **kwargs)

View file

@ -0,0 +1,24 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for interacting with the google.longrunning.operations meta-API."""
import sys
from google.api_core.operations_v1.operations_client import OperationsClient
__all__ = ["OperationsClient"]
if sys.version_info >= (3, 6, 0):
from google.api_core.operations_v1.operations_async_client import OperationsAsyncClient # noqa: F401
__all__.append("OperationsAsyncClient")

View file

@ -0,0 +1,274 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An async client for the google.longrunning.operations meta-API.
.. _Google API Style Guide:
https://cloud.google.com/apis/design/design_pattern
s#long_running_operations
.. _google/longrunning/operations.proto:
https://github.com/googleapis/googleapis/blob/master/google/longrunning
/operations.proto
"""
import functools
from google.api_core import gapic_v1, page_iterator_async
from google.api_core.operations_v1 import operations_client_config
from google.longrunning import operations_pb2
class OperationsAsyncClient:
"""Async client for interacting with long-running operations.
Args:
channel (aio.Channel): The gRPC AsyncIO channel associated with the
service that implements the ``google.longrunning.operations``
interface.
client_config (dict):
A dictionary of call options for each method. If not specified
the default configuration is used.
"""
def __init__(self, channel, client_config=operations_client_config.config):
# Create the gRPC client stub with gRPC AsyncIO channel.
self.operations_stub = operations_pb2.OperationsStub(channel)
# Create all wrapped methods using the interface configuration.
# The interface config contains all of the default settings for retry
# and timeout for each RPC method.
interfaces = client_config["interfaces"]
interface_config = interfaces["google.longrunning.Operations"]
method_configs = gapic_v1.config_async.parse_method_configs(interface_config)
self._get_operation = gapic_v1.method_async.wrap_method(
self.operations_stub.GetOperation,
default_retry=method_configs["GetOperation"].retry,
default_timeout=method_configs["GetOperation"].timeout,
)
self._list_operations = gapic_v1.method_async.wrap_method(
self.operations_stub.ListOperations,
default_retry=method_configs["ListOperations"].retry,
default_timeout=method_configs["ListOperations"].timeout,
)
self._cancel_operation = gapic_v1.method_async.wrap_method(
self.operations_stub.CancelOperation,
default_retry=method_configs["CancelOperation"].retry,
default_timeout=method_configs["CancelOperation"].timeout,
)
self._delete_operation = gapic_v1.method_async.wrap_method(
self.operations_stub.DeleteOperation,
default_retry=method_configs["DeleteOperation"].retry,
default_timeout=method_configs["DeleteOperation"].timeout,
)
async def get_operation(
self, name, retry=gapic_v1.method_async.DEFAULT, timeout=gapic_v1.method_async.DEFAULT
):
"""Gets the latest state of a long-running operation.
Clients can use this method to poll the operation result at intervals
as recommended by the API service.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> response = await api.get_operation(name)
Args:
name (str): The name of the operation resource.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Returns:
google.longrunning.operations_pb2.Operation: The state of the
operation.
Raises:
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
request = operations_pb2.GetOperationRequest(name=name)
return await self._get_operation(request, retry=retry, timeout=timeout)
async def list_operations(
self,
name,
filter_,
retry=gapic_v1.method_async.DEFAULT,
timeout=gapic_v1.method_async.DEFAULT,
):
"""
Lists operations that match the specified filter in the request.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>>
>>> # Iterate over all results
>>> for operation in await api.list_operations(name):
>>> # process operation
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> iter = await api.list_operations(name)
>>> for page in iter.pages:
>>> for operation in page:
>>> # process operation
>>> pass
Args:
name (str): The name of the operation collection.
filter_ (str): The standard list filter.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Returns:
google.api_core.page_iterator.Iterator: An iterator that yields
:class:`google.longrunning.operations_pb2.Operation` instances.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.ListOperationsRequest(name=name, filter=filter_)
# Create the method used to fetch pages
method = functools.partial(self._list_operations, retry=retry, timeout=timeout)
iterator = page_iterator_async.AsyncGRPCIterator(
client=None,
method=method,
request=request,
items_field="operations",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
async def cancel_operation(
self, name, retry=gapic_v1.method_async.DEFAULT, timeout=gapic_v1.method_async.DEFAULT
):
"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success is
not guaranteed. Clients can use :meth:`get_operation` or service-
specific methods to check whether the cancellation succeeded or whether
the operation completed despite cancellation. On successful
cancellation, the operation is not deleted; instead, it becomes an
operation with an ``Operation.error`` value with a
``google.rpc.Status.code`` of ``1``, corresponding to
``Code.CANCELLED``.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (str): The name of the operation resource to be cancelled.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.CancelOperationRequest(name=name)
await self._cancel_operation(request, retry=retry, timeout=timeout)
async def delete_operation(
self, name, retry=gapic_v1.method_async.DEFAULT, timeout=gapic_v1.method_async.DEFAULT
):
"""Deletes a long-running operation.
This method indicates that the client is no longer interested in the
operation result. It does not cancel the operation.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> api.delete_operation(name)
Args:
name (str): The name of the operation resource to be deleted.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.DeleteOperationRequest(name=name)
await self._delete_operation(request, retry=retry, timeout=timeout)

View file

@ -0,0 +1,288 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client for the google.longrunning.operations meta-API.
This is a client that deals with long-running operations that follow the
pattern outlined by the `Google API Style Guide`_.
When an API method normally takes long time to complete, it can be designed to
return ``Operation`` to the client, and the client can use this interface to
receive the real response asynchronously by polling the operation resource to
receive the response.
It is not a separate service, but rather an interface implemented by a larger
service. The protocol-level definition is available at
`google/longrunning/operations.proto`_. Typically, this will be constructed
automatically by another client class to deal with operations.
.. _Google API Style Guide:
https://cloud.google.com/apis/design/design_pattern
s#long_running_operations
.. _google/longrunning/operations.proto:
https://github.com/googleapis/googleapis/blob/master/google/longrunning
/operations.proto
"""
import functools
from google.api_core import gapic_v1
from google.api_core import page_iterator
from google.api_core.operations_v1 import operations_client_config
from google.longrunning import operations_pb2
class OperationsClient(object):
"""Client for interacting with long-running operations within a service.
Args:
channel (grpc.Channel): The gRPC channel associated with the service
that implements the ``google.longrunning.operations`` interface.
client_config (dict):
A dictionary of call options for each method. If not specified
the default configuration is used.
"""
def __init__(self, channel, client_config=operations_client_config.config):
# Create the gRPC client stub.
self.operations_stub = operations_pb2.OperationsStub(channel)
# Create all wrapped methods using the interface configuration.
# The interface config contains all of the default settings for retry
# and timeout for each RPC method.
interfaces = client_config["interfaces"]
interface_config = interfaces["google.longrunning.Operations"]
method_configs = gapic_v1.config.parse_method_configs(interface_config)
self._get_operation = gapic_v1.method.wrap_method(
self.operations_stub.GetOperation,
default_retry=method_configs["GetOperation"].retry,
default_timeout=method_configs["GetOperation"].timeout,
)
self._list_operations = gapic_v1.method.wrap_method(
self.operations_stub.ListOperations,
default_retry=method_configs["ListOperations"].retry,
default_timeout=method_configs["ListOperations"].timeout,
)
self._cancel_operation = gapic_v1.method.wrap_method(
self.operations_stub.CancelOperation,
default_retry=method_configs["CancelOperation"].retry,
default_timeout=method_configs["CancelOperation"].timeout,
)
self._delete_operation = gapic_v1.method.wrap_method(
self.operations_stub.DeleteOperation,
default_retry=method_configs["DeleteOperation"].retry,
default_timeout=method_configs["DeleteOperation"].timeout,
)
# Service calls
def get_operation(
self, name, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT
):
"""Gets the latest state of a long-running operation.
Clients can use this method to poll the operation result at intervals
as recommended by the API service.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> response = api.get_operation(name)
Args:
name (str): The name of the operation resource.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Returns:
google.longrunning.operations_pb2.Operation: The state of the
operation.
Raises:
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
request = operations_pb2.GetOperationRequest(name=name)
return self._get_operation(request, retry=retry, timeout=timeout)
def list_operations(
self,
name,
filter_,
retry=gapic_v1.method.DEFAULT,
timeout=gapic_v1.method.DEFAULT,
):
"""
Lists operations that match the specified filter in the request.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>>
>>> # Iterate over all results
>>> for operation in api.list_operations(name):
>>> # process operation
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> iter = api.list_operations(name)
>>> for page in iter.pages:
>>> for operation in page:
>>> # process operation
>>> pass
Args:
name (str): The name of the operation collection.
filter_ (str): The standard list filter.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Returns:
google.api_core.page_iterator.Iterator: An iterator that yields
:class:`google.longrunning.operations_pb2.Operation` instances.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.ListOperationsRequest(name=name, filter=filter_)
# Create the method used to fetch pages
method = functools.partial(self._list_operations, retry=retry, timeout=timeout)
iterator = page_iterator.GRPCIterator(
client=None,
method=method,
request=request,
items_field="operations",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def cancel_operation(
self, name, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT
):
"""Starts asynchronous cancellation on a long-running operation.
The server makes a best effort to cancel the operation, but success is
not guaranteed. Clients can use :meth:`get_operation` or service-
specific methods to check whether the cancellation succeeded or whether
the operation completed despite cancellation. On successful
cancellation, the operation is not deleted; instead, it becomes an
operation with an ``Operation.error`` value with a
``google.rpc.Status.code`` of ``1``, corresponding to
``Code.CANCELLED``.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> api.cancel_operation(name)
Args:
name (str): The name of the operation resource to be cancelled.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.CancelOperationRequest(name=name)
self._cancel_operation(request, retry=retry, timeout=timeout)
def delete_operation(
self, name, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT
):
"""Deletes a long-running operation.
This method indicates that the client is no longer interested in the
operation result. It does not cancel the operation.
Example:
>>> from google.api_core import operations_v1
>>> api = operations_v1.OperationsClient()
>>> name = ''
>>> api.delete_operation(name)
Args:
name (str): The name of the operation resource to be deleted.
retry (google.api_core.retry.Retry): The retry strategy to use
when invoking the RPC. If unspecified, the default retry from
the client configuration will be used. If ``None``, then this
method will not retry the RPC at all.
timeout (float): The amount of time in seconds to wait for the RPC
to complete. Note that if ``retry`` is used, this timeout
applies to each individual attempt and the overall time it
takes for this method to complete may be longer. If
unspecified, the the default timeout in the client
configuration is used. If ``None``, then the RPC method will
not time out.
Raises:
google.api_core.exceptions.MethodNotImplemented: If the server
does not support this method. Services are not required to
implement this method.
google.api_core.exceptions.GoogleAPICallError: If an error occurred
while invoking the RPC, the appropriate ``GoogleAPICallError``
subclass will be raised.
"""
# Create the request object.
request = operations_pb2.DeleteOperationRequest(name=name)
self._delete_operation(request, retry=retry, timeout=timeout)

View file

@ -0,0 +1,59 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gapic configuration for the googe.longrunning.operations client."""
config = {
"interfaces": {
"google.longrunning.Operations": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 600000,
"total_timeout_millis": 600000,
}
},
"methods": {
"GetOperation": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListOperations": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CancelOperation": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"DeleteOperation": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
},
}
}
}

View file

@ -0,0 +1,557 @@
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iterators for paging through paged API methods.
These iterators simplify the process of paging through API responses
where the request takes a page token and the response is a list of results with
a token for the next page. See `list pagination`_ in the Google API Style Guide
for more details.
.. _list pagination:
https://cloud.google.com/apis/design/design_patterns#list_pagination
API clients that have methods that follow the list pagination pattern can
return an :class:`.Iterator`. You can use this iterator to get **all** of
the results across all pages::
>>> results_iterator = client.list_resources()
>>> list(results_iterator) # Convert to a list (consumes all values).
Or you can walk your way through items and call off the search early if
you find what you're looking for (resulting in possibly fewer requests)::
>>> for resource in results_iterator:
... print(resource.name)
... if not resource.is_valid:
... break
At any point, you may check the number of items consumed by referencing the
``num_results`` property of the iterator::
>>> for my_item in results_iterator:
... if results_iterator.num_results >= 10:
... break
When iterating, not every new item will send a request to the server.
To iterate based on each page of items (where a page corresponds to
a request)::
>>> for page in results_iterator.pages:
... print('=' * 20)
... print(' Page number: {:d}'.format(iterator.page_number))
... print(' Items in page: {:d}'.format(page.num_items))
... print(' First item: {!r}'.format(next(page)))
... print('Items remaining: {:d}'.format(page.remaining))
... print('Next page token: {}'.format(iterator.next_page_token))
====================
Page number: 1
Items in page: 1
First item: <MyItemClass at 0x7f1d3cccf690>
Items remaining: 0
Next page token: eav1OzQB0OM8rLdGXOEsyQWSG
====================
Page number: 2
Items in page: 19
First item: <MyItemClass at 0x7f1d3cccffd0>
Items remaining: 18
Next page token: None
Then, for each page you can get all the resources on that page by iterating
through it or using :func:`list`::
>>> list(page)
[
<MyItemClass at 0x7fd64a098ad0>,
<MyItemClass at 0x7fd64a098ed0>,
<MyItemClass at 0x7fd64a098e90>,
]
"""
import abc
import six
class Page(object):
"""Single page of results in an iterator.
Args:
parent (google.api_core.page_iterator.Iterator): The iterator that owns
the current page.
items (Sequence[Any]): An iterable (that also defines __len__) of items
from a raw API response.
item_to_value (Callable[google.api_core.page_iterator.Iterator, Any]):
Callable to convert an item from the type in the raw API response
into the native object. Will be called with the iterator and a
single item.
raw_page Optional[google.protobuf.message.Message]:
The raw page response.
"""
def __init__(self, parent, items, item_to_value, raw_page=None):
self._parent = parent
self._num_items = len(items)
self._remaining = self._num_items
self._item_iter = iter(items)
self._item_to_value = item_to_value
self._raw_page = raw_page
@property
def raw_page(self):
"""google.protobuf.message.Message"""
return self._raw_page
@property
def num_items(self):
"""int: Total items in the page."""
return self._num_items
@property
def remaining(self):
"""int: Remaining items in the page."""
return self._remaining
def __iter__(self):
"""The :class:`Page` is an iterator of items."""
return self
def next(self):
"""Get the next value in the page."""
item = six.next(self._item_iter)
result = self._item_to_value(self._parent, item)
# Since we've successfully got the next value from the
# iterator, we update the number of remaining.
self._remaining -= 1
return result
# Alias needed for Python 2/3 support.
__next__ = next
def _item_to_value_identity(iterator, item):
"""An item to value transformer that returns the item un-changed."""
# pylint: disable=unused-argument
# We are conforming to the interface defined by Iterator.
return item
@six.add_metaclass(abc.ABCMeta)
class Iterator(object):
"""A generic class for iterating through API list responses.
Args:
client(google.cloud.client.Client): The API client.
item_to_value (Callable[google.api_core.page_iterator.Iterator, Any]):
Callable to convert an item from the type in the raw API response
into the native object. Will be called with the iterator and a
single item.
page_token (str): A token identifying a page in a result set to start
fetching results from.
max_results (int): The maximum number of results to fetch.
"""
def __init__(
self,
client,
item_to_value=_item_to_value_identity,
page_token=None,
max_results=None,
):
self._started = False
self.client = client
"""Optional[Any]: The client that created this iterator."""
self.item_to_value = item_to_value
"""Callable[Iterator, Any]: Callable to convert an item from the type
in the raw API response into the native object. Will be called with
the iterator and a
single item.
"""
self.max_results = max_results
"""int: The maximum number of results to fetch."""
# The attributes below will change over the life of the iterator.
self.page_number = 0
"""int: The current page of results."""
self.next_page_token = page_token
"""str: The token for the next page of results. If this is set before
the iterator starts, it effectively offsets the iterator to a
specific starting point."""
self.num_results = 0
"""int: The total number of results fetched so far."""
@property
def pages(self):
"""Iterator of pages in the response.
returns:
types.GeneratorType[google.api_core.page_iterator.Page]: A
generator of page instances.
raises:
ValueError: If the iterator has already been started.
"""
if self._started:
raise ValueError("Iterator has already started", self)
self._started = True
return self._page_iter(increment=True)
def _items_iter(self):
"""Iterator for each item returned."""
for page in self._page_iter(increment=False):
for item in page:
self.num_results += 1
yield item
def __iter__(self):
"""Iterator for each item returned.
Returns:
types.GeneratorType[Any]: A generator of items from the API.
Raises:
ValueError: If the iterator has already been started.
"""
if self._started:
raise ValueError("Iterator has already started", self)
self._started = True
return self._items_iter()
def _page_iter(self, increment):
"""Generator of pages of API responses.
Args:
increment (bool): Flag indicating if the total number of results
should be incremented on each page. This is useful since a page
iterator will want to increment by results per page while an
items iterator will want to increment per item.
Yields:
Page: each page of items from the API.
"""
page = self._next_page()
while page is not None:
self.page_number += 1
if increment:
self.num_results += page.num_items
yield page
page = self._next_page()
@abc.abstractmethod
def _next_page(self):
"""Get the next page in the iterator.
This does nothing and is intended to be over-ridden by subclasses
to return the next :class:`Page`.
Raises:
NotImplementedError: Always, this method is abstract.
"""
raise NotImplementedError
def _do_nothing_page_start(iterator, page, response):
"""Helper to provide custom behavior after a :class:`Page` is started.
This is a do-nothing stand-in as the default value.
Args:
iterator (Iterator): An iterator that holds some request info.
page (Page): The page that was just created.
response (Any): The API response for a page.
"""
# pylint: disable=unused-argument
pass
class HTTPIterator(Iterator):
"""A generic class for iterating through HTTP/JSON API list responses.
To make an iterator work, you'll need to provide a way to convert a JSON
item returned from the API into the object of your choice (via
``item_to_value``). You also may need to specify a custom ``items_key`` so
that a given response (containing a page of results) can be parsed into an
iterable page of the actual objects you want.
Args:
client (google.cloud.client.Client): The API client.
api_request (Callable): The function to use to make API requests.
Generally, this will be
:meth:`google.cloud._http.JSONConnection.api_request`.
path (str): The method path to query for the list of items.
item_to_value (Callable[google.api_core.page_iterator.Iterator, Any]):
Callable to convert an item from the type in the JSON response into
a native object. Will be called with the iterator and a single
item.
items_key (str): The key in the API response where the list of items
can be found.
page_token (str): A token identifying a page in a result set to start
fetching results from.
max_results (int): The maximum number of results to fetch.
extra_params (dict): Extra query string parameters for the
API call.
page_start (Callable[
google.api_core.page_iterator.Iterator,
google.api_core.page_iterator.Page, dict]): Callable to provide
any special behavior after a new page has been created. Assumed
signature takes the :class:`.Iterator` that started the page,
the :class:`.Page` that was started and the dictionary containing
the page response.
next_token (str): The name of the field used in the response for page
tokens.
.. autoattribute:: pages
"""
_DEFAULT_ITEMS_KEY = "items"
_PAGE_TOKEN = "pageToken"
_MAX_RESULTS = "maxResults"
_NEXT_TOKEN = "nextPageToken"
_RESERVED_PARAMS = frozenset([_PAGE_TOKEN])
_HTTP_METHOD = "GET"
def __init__(
self,
client,
api_request,
path,
item_to_value,
items_key=_DEFAULT_ITEMS_KEY,
page_token=None,
max_results=None,
extra_params=None,
page_start=_do_nothing_page_start,
next_token=_NEXT_TOKEN,
):
super(HTTPIterator, self).__init__(
client, item_to_value, page_token=page_token, max_results=max_results
)
self.api_request = api_request
self.path = path
self._items_key = items_key
self.extra_params = extra_params
self._page_start = page_start
self._next_token = next_token
# Verify inputs / provide defaults.
if self.extra_params is None:
self.extra_params = {}
self._verify_params()
def _verify_params(self):
"""Verifies the parameters don't use any reserved parameter.
Raises:
ValueError: If a reserved parameter is used.
"""
reserved_in_use = self._RESERVED_PARAMS.intersection(self.extra_params)
if reserved_in_use:
raise ValueError("Using a reserved parameter", reserved_in_use)
def _next_page(self):
"""Get the next page in the iterator.
Returns:
Optional[Page]: The next page in the iterator or :data:`None` if
there are no pages left.
"""
if self._has_next_page():
response = self._get_next_page_response()
items = response.get(self._items_key, ())
page = Page(self, items, self.item_to_value, raw_page=response)
self._page_start(self, page, response)
self.next_page_token = response.get(self._next_token)
return page
else:
return None
def _has_next_page(self):
"""Determines whether or not there are more pages with results.
Returns:
bool: Whether the iterator has more pages.
"""
if self.page_number == 0:
return True
if self.max_results is not None:
if self.num_results >= self.max_results:
return False
return self.next_page_token is not None
def _get_query_params(self):
"""Getter for query parameters for the next request.
Returns:
dict: A dictionary of query parameters.
"""
result = {}
if self.next_page_token is not None:
result[self._PAGE_TOKEN] = self.next_page_token
if self.max_results is not None:
result[self._MAX_RESULTS] = self.max_results - self.num_results
result.update(self.extra_params)
return result
def _get_next_page_response(self):
"""Requests the next page from the path provided.
Returns:
dict: The parsed JSON response of the next page's contents.
Raises:
ValueError: If the HTTP method is not ``GET`` or ``POST``.
"""
params = self._get_query_params()
if self._HTTP_METHOD == "GET":
return self.api_request(
method=self._HTTP_METHOD, path=self.path, query_params=params
)
elif self._HTTP_METHOD == "POST":
return self.api_request(
method=self._HTTP_METHOD, path=self.path, data=params
)
else:
raise ValueError("Unexpected HTTP method", self._HTTP_METHOD)
class _GAXIterator(Iterator):
"""A generic class for iterating through Cloud gRPC APIs list responses.
Any:
client (google.cloud.client.Client): The API client.
page_iter (google.gax.PageIterator): A GAX page iterator to be wrapped
to conform to the :class:`Iterator` interface.
item_to_value (Callable[Iterator, Any]): Callable to convert an item
from the the protobuf response into a native object. Will
be called with the iterator and a single item.
max_results (int): The maximum number of results to fetch.
.. autoattribute:: pages
"""
def __init__(self, client, page_iter, item_to_value, max_results=None):
super(_GAXIterator, self).__init__(
client,
item_to_value,
page_token=page_iter.page_token,
max_results=max_results,
)
self._gax_page_iter = page_iter
def _next_page(self):
"""Get the next page in the iterator.
Wraps the response from the :class:`~google.gax.PageIterator` in a
:class:`Page` instance and captures some state at each page.
Returns:
Optional[Page]: The next page in the iterator or :data:`None` if
there are no pages left.
"""
try:
items = six.next(self._gax_page_iter)
page = Page(self, items, self.item_to_value)
self.next_page_token = self._gax_page_iter.page_token or None
return page
except StopIteration:
return None
class GRPCIterator(Iterator):
"""A generic class for iterating through gRPC list responses.
.. note:: The class does not take a ``page_token`` argument because it can
just be specified in the ``request``.
Args:
client (google.cloud.client.Client): The API client. This unused by
this class, but kept to satisfy the :class:`Iterator` interface.
method (Callable[protobuf.Message]): A bound gRPC method that should
take a single message for the request.
request (protobuf.Message): The request message.
items_field (str): The field in the response message that has the
items for the page.
item_to_value (Callable[GRPCIterator, Any]): Callable to convert an
item from the type in the JSON response into a native object. Will
be called with the iterator and a single item.
request_token_field (str): The field in the request message used to
specify the page token.
response_token_field (str): The field in the response message that has
the token for the next page.
max_results (int): The maximum number of results to fetch.
.. autoattribute:: pages
"""
_DEFAULT_REQUEST_TOKEN_FIELD = "page_token"
_DEFAULT_RESPONSE_TOKEN_FIELD = "next_page_token"
def __init__(
self,
client,
method,
request,
items_field,
item_to_value=_item_to_value_identity,
request_token_field=_DEFAULT_REQUEST_TOKEN_FIELD,
response_token_field=_DEFAULT_RESPONSE_TOKEN_FIELD,
max_results=None,
):
super(GRPCIterator, self).__init__(
client, item_to_value, max_results=max_results
)
self._method = method
self._request = request
self._items_field = items_field
self._request_token_field = request_token_field
self._response_token_field = response_token_field
def _next_page(self):
"""Get the next page in the iterator.
Returns:
Page: The next page in the iterator or :data:`None` if
there are no pages left.
"""
if not self._has_next_page():
return None
if self.next_page_token is not None:
setattr(self._request, self._request_token_field, self.next_page_token)
response = self._method(self._request)
self.next_page_token = getattr(response, self._response_token_field)
items = getattr(response, self._items_field)
page = Page(self, items, self.item_to_value, raw_page=response)
return page
def _has_next_page(self):
"""Determines whether or not there are more pages with results.
Returns:
bool: Whether the iterator has more pages.
"""
if self.page_number == 0:
return True
if self.max_results is not None:
if self.num_results >= self.max_results:
return False
# Note: intentionally a falsy check instead of a None check. The RPC
# can return an empty string indicating no more pages.
return True if self.next_page_token else False

View file

@ -0,0 +1,278 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AsyncIO iterators for paging through paged API methods.
These iterators simplify the process of paging through API responses
where the request takes a page token and the response is a list of results with
a token for the next page. See `list pagination`_ in the Google API Style Guide
for more details.
.. _list pagination:
https://cloud.google.com/apis/design/design_patterns#list_pagination
API clients that have methods that follow the list pagination pattern can
return an :class:`.AsyncIterator`:
>>> results_iterator = await client.list_resources()
Or you can walk your way through items and call off the search early if
you find what you're looking for (resulting in possibly fewer requests)::
>>> async for resource in results_iterator:
... print(resource.name)
... if not resource.is_valid:
... break
At any point, you may check the number of items consumed by referencing the
``num_results`` property of the iterator::
>>> async for my_item in results_iterator:
... if results_iterator.num_results >= 10:
... break
When iterating, not every new item will send a request to the server.
To iterate based on each page of items (where a page corresponds to
a request)::
>>> async for page in results_iterator.pages:
... print('=' * 20)
... print(' Page number: {:d}'.format(iterator.page_number))
... print(' Items in page: {:d}'.format(page.num_items))
... print(' First item: {!r}'.format(next(page)))
... print('Items remaining: {:d}'.format(page.remaining))
... print('Next page token: {}'.format(iterator.next_page_token))
====================
Page number: 1
Items in page: 1
First item: <MyItemClass at 0x7f1d3cccf690>
Items remaining: 0
Next page token: eav1OzQB0OM8rLdGXOEsyQWSG
====================
Page number: 2
Items in page: 19
First item: <MyItemClass at 0x7f1d3cccffd0>
Items remaining: 18
Next page token: None
"""
import abc
from google.api_core.page_iterator import Page
def _item_to_value_identity(iterator, item):
"""An item to value transformer that returns the item un-changed."""
# pylint: disable=unused-argument
# We are conforming to the interface defined by Iterator.
return item
class AsyncIterator(abc.ABC):
"""A generic class for iterating through API list responses.
Args:
client(google.cloud.client.Client): The API client.
item_to_value (Callable[google.api_core.page_iterator_async.AsyncIterator, Any]):
Callable to convert an item from the type in the raw API response
into the native object. Will be called with the iterator and a
single item.
page_token (str): A token identifying a page in a result set to start
fetching results from.
max_results (int): The maximum number of results to fetch.
"""
def __init__(
self,
client,
item_to_value=_item_to_value_identity,
page_token=None,
max_results=None,
):
self._started = False
self.client = client
"""Optional[Any]: The client that created this iterator."""
self.item_to_value = item_to_value
"""Callable[Iterator, Any]: Callable to convert an item from the type
in the raw API response into the native object. Will be called with
the iterator and a
single item.
"""
self.max_results = max_results
"""int: The maximum number of results to fetch."""
# The attributes below will change over the life of the iterator.
self.page_number = 0
"""int: The current page of results."""
self.next_page_token = page_token
"""str: The token for the next page of results. If this is set before
the iterator starts, it effectively offsets the iterator to a
specific starting point."""
self.num_results = 0
"""int: The total number of results fetched so far."""
@property
def pages(self):
"""Iterator of pages in the response.
returns:
types.GeneratorType[google.api_core.page_iterator.Page]: A
generator of page instances.
raises:
ValueError: If the iterator has already been started.
"""
if self._started:
raise ValueError("Iterator has already started", self)
self._started = True
return self._page_aiter(increment=True)
async def _items_aiter(self):
"""Iterator for each item returned."""
async for page in self._page_aiter(increment=False):
for item in page:
self.num_results += 1
yield item
def __aiter__(self):
"""Iterator for each item returned.
Returns:
types.GeneratorType[Any]: A generator of items from the API.
Raises:
ValueError: If the iterator has already been started.
"""
if self._started:
raise ValueError("Iterator has already started", self)
self._started = True
return self._items_aiter()
async def _page_aiter(self, increment):
"""Generator of pages of API responses.
Args:
increment (bool): Flag indicating if the total number of results
should be incremented on each page. This is useful since a page
iterator will want to increment by results per page while an
items iterator will want to increment per item.
Yields:
Page: each page of items from the API.
"""
page = await self._next_page()
while page is not None:
self.page_number += 1
if increment:
self.num_results += page.num_items
yield page
page = await self._next_page()
@abc.abstractmethod
async def _next_page(self):
"""Get the next page in the iterator.
This does nothing and is intended to be over-ridden by subclasses
to return the next :class:`Page`.
Raises:
NotImplementedError: Always, this method is abstract.
"""
raise NotImplementedError
class AsyncGRPCIterator(AsyncIterator):
"""A generic class for iterating through gRPC list responses.
.. note:: The class does not take a ``page_token`` argument because it can
just be specified in the ``request``.
Args:
client (google.cloud.client.Client): The API client. This unused by
this class, but kept to satisfy the :class:`Iterator` interface.
method (Callable[protobuf.Message]): A bound gRPC method that should
take a single message for the request.
request (protobuf.Message): The request message.
items_field (str): The field in the response message that has the
items for the page.
item_to_value (Callable[GRPCIterator, Any]): Callable to convert an
item from the type in the JSON response into a native object. Will
be called with the iterator and a single item.
request_token_field (str): The field in the request message used to
specify the page token.
response_token_field (str): The field in the response message that has
the token for the next page.
max_results (int): The maximum number of results to fetch.
.. autoattribute:: pages
"""
_DEFAULT_REQUEST_TOKEN_FIELD = "page_token"
_DEFAULT_RESPONSE_TOKEN_FIELD = "next_page_token"
def __init__(
self,
client,
method,
request,
items_field,
item_to_value=_item_to_value_identity,
request_token_field=_DEFAULT_REQUEST_TOKEN_FIELD,
response_token_field=_DEFAULT_RESPONSE_TOKEN_FIELD,
max_results=None,
):
super().__init__(client, item_to_value, max_results=max_results)
self._method = method
self._request = request
self._items_field = items_field
self._request_token_field = request_token_field
self._response_token_field = response_token_field
async def _next_page(self):
"""Get the next page in the iterator.
Returns:
Page: The next page in the iterator or :data:`None` if
there are no pages left.
"""
if not self._has_next_page():
return None
if self.next_page_token is not None:
setattr(self._request, self._request_token_field, self.next_page_token)
response = await self._method(self._request)
self.next_page_token = getattr(response, self._response_token_field)
items = getattr(response, self._items_field)
page = Page(self, items, self.item_to_value, raw_page=response)
return page
def _has_next_page(self):
"""Determines whether or not there are more pages with results.
Returns:
bool: Whether the iterator has more pages.
"""
if self.page_number == 0:
return True
# Note: intentionally a falsy check instead of a None check. The RPC
# can return an empty string indicating no more pages.
if self.max_results is not None:
if self.num_results >= self.max_results:
return False
return True if self.next_page_token else False

View file

@ -0,0 +1,197 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Expand and validate URL path templates.
This module provides the :func:`expand` and :func:`validate` functions for
interacting with Google-style URL `path templates`_ which are commonly used
in Google APIs for `resource names`_.
.. _path templates: https://github.com/googleapis/googleapis/blob
/57e2d376ac7ef48681554204a3ba78a414f2c533/google/api/http.proto#L212
.. _resource names: https://cloud.google.com/apis/design/resource_names
"""
from __future__ import unicode_literals
import functools
import re
import six
# Regular expression for extracting variable parts from a path template.
# The variables can be expressed as:
#
# - "*": a single-segment positional variable, for example: "books/*"
# - "**": a multi-segment positional variable, for example: "shelf/**/book/*"
# - "{name}": a single-segment wildcard named variable, for example
# "books/{name}"
# - "{name=*}: same as above.
# - "{name=**}": a multi-segment wildcard named variable, for example
# "shelf/{name=**}"
# - "{name=/path/*/**}": a multi-segment named variable with a sub-template.
_VARIABLE_RE = re.compile(
r"""
( # Capture the entire variable expression
(?P<positional>\*\*?) # Match & capture * and ** positional variables.
|
# Match & capture named variables {name}
{
(?P<name>[^/]+?)
# Optionally match and capture the named variable's template.
(?:=(?P<template>.+?))?
}
)
""",
re.VERBOSE,
)
# Segment expressions used for validating paths against a template.
_SINGLE_SEGMENT_PATTERN = r"([^/]+)"
_MULTI_SEGMENT_PATTERN = r"(.+)"
def _expand_variable_match(positional_vars, named_vars, match):
"""Expand a matched variable with its value.
Args:
positional_vars (list): A list of positonal variables. This list will
be modified.
named_vars (dict): A dictionary of named variables.
match (re.Match): A regular expression match.
Returns:
str: The expanded variable to replace the match.
Raises:
ValueError: If a positional or named variable is required by the
template but not specified or if an unexpected template expression
is encountered.
"""
positional = match.group("positional")
name = match.group("name")
if name is not None:
try:
return six.text_type(named_vars[name])
except KeyError:
raise ValueError(
"Named variable '{}' not specified and needed by template "
"`{}` at position {}".format(name, match.string, match.start())
)
elif positional is not None:
try:
return six.text_type(positional_vars.pop(0))
except IndexError:
raise ValueError(
"Positional variable not specified and needed by template "
"`{}` at position {}".format(match.string, match.start())
)
else:
raise ValueError("Unknown template expression {}".format(match.group(0)))
def expand(tmpl, *args, **kwargs):
"""Expand a path template with the given variables.
..code-block:: python
>>> expand('users/*/messages/*', 'me', '123')
users/me/messages/123
>>> expand('/v1/{name=shelves/*/books/*}', name='shelves/1/books/3')
/v1/shelves/1/books/3
Args:
tmpl (str): The path template.
args: The positional variables for the path.
kwargs: The named variables for the path.
Returns:
str: The expanded path
Raises:
ValueError: If a positional or named variable is required by the
template but not specified or if an unexpected template expression
is encountered.
"""
replacer = functools.partial(_expand_variable_match, list(args), kwargs)
return _VARIABLE_RE.sub(replacer, tmpl)
def _replace_variable_with_pattern(match):
"""Replace a variable match with a pattern that can be used to validate it.
Args:
match (re.Match): A regular expression match
Returns:
str: A regular expression pattern that can be used to validate the
variable in an expanded path.
Raises:
ValueError: If an unexpected template expression is encountered.
"""
positional = match.group("positional")
name = match.group("name")
template = match.group("template")
if name is not None:
if not template:
return _SINGLE_SEGMENT_PATTERN.format(name)
elif template == "**":
return _MULTI_SEGMENT_PATTERN.format(name)
else:
return _generate_pattern_for_template(template)
elif positional == "*":
return _SINGLE_SEGMENT_PATTERN
elif positional == "**":
return _MULTI_SEGMENT_PATTERN
else:
raise ValueError("Unknown template expression {}".format(match.group(0)))
def _generate_pattern_for_template(tmpl):
"""Generate a pattern that can validate a path template.
Args:
tmpl (str): The path template
Returns:
str: A regular expression pattern that can be used to validate an
expanded path template.
"""
return _VARIABLE_RE.sub(_replace_variable_with_pattern, tmpl)
def validate(tmpl, path):
"""Validate a path against the path template.
.. code-block:: python
>>> validate('users/*/messages/*', 'users/me/messages/123')
True
>>> validate('users/*/messages/*', 'users/me/drafts/123')
False
>>> validate('/v1/{name=shelves/*/books/*}', /v1/shelves/1/books/3)
True
>>> validate('/v1/{name=shelves/*/books/*}', /v1/shelves/1/tapes/3)
False
Args:
tmpl (str): The path template.
path (str): The expanded path.
Returns:
bool: True if the path matches.
"""
pattern = _generate_pattern_for_template(tmpl) + "$"
return True if re.match(pattern, path) is not None else False

View file

@ -0,0 +1,370 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for :mod:`protobuf`."""
import collections
import copy
import inspect
from google.protobuf import field_mask_pb2
from google.protobuf import message
from google.protobuf import wrappers_pb2
try:
from collections import abc as collections_abc
except ImportError: # Python 2.7
import collections as collections_abc
_SENTINEL = object()
_WRAPPER_TYPES = (
wrappers_pb2.BoolValue,
wrappers_pb2.BytesValue,
wrappers_pb2.DoubleValue,
wrappers_pb2.FloatValue,
wrappers_pb2.Int32Value,
wrappers_pb2.Int64Value,
wrappers_pb2.StringValue,
wrappers_pb2.UInt32Value,
wrappers_pb2.UInt64Value,
)
def from_any_pb(pb_type, any_pb):
"""Converts an ``Any`` protobuf to the specified message type.
Args:
pb_type (type): the type of the message that any_pb stores an instance
of.
any_pb (google.protobuf.any_pb2.Any): the object to be converted.
Returns:
pb_type: An instance of the pb_type message.
Raises:
TypeError: if the message could not be converted.
"""
msg = pb_type()
# Unwrap proto-plus wrapped messages.
if callable(getattr(pb_type, "pb", None)):
msg_pb = pb_type.pb(msg)
else:
msg_pb = msg
# Unpack the Any object and populate the protobuf message instance.
if not any_pb.Unpack(msg_pb):
raise TypeError(
"Could not convert {} to {}".format(
any_pb.__class__.__name__, pb_type.__name__
)
)
# Done; return the message.
return msg
def check_oneof(**kwargs):
"""Raise ValueError if more than one keyword argument is not ``None``.
Args:
kwargs (dict): The keyword arguments sent to the function.
Raises:
ValueError: If more than one entry in ``kwargs`` is not ``None``.
"""
# Sanity check: If no keyword arguments were sent, this is fine.
if not kwargs:
return
not_nones = [val for val in kwargs.values() if val is not None]
if len(not_nones) > 1:
raise ValueError(
"Only one of {fields} should be set.".format(
fields=", ".join(sorted(kwargs.keys()))
)
)
def get_messages(module):
"""Discovers all protobuf Message classes in a given import module.
Args:
module (module): A Python module; :func:`dir` will be run against this
module to find Message subclasses.
Returns:
dict[str, google.protobuf.message.Message]: A dictionary with the
Message class names as keys, and the Message subclasses themselves
as values.
"""
answer = collections.OrderedDict()
for name in dir(module):
candidate = getattr(module, name)
if inspect.isclass(candidate) and issubclass(candidate, message.Message):
answer[name] = candidate
return answer
def _resolve_subkeys(key, separator="."):
"""Resolve a potentially nested key.
If the key contains the ``separator`` (e.g. ``.``) then the key will be
split on the first instance of the subkey::
>>> _resolve_subkeys('a.b.c')
('a', 'b.c')
>>> _resolve_subkeys('d|e|f', separator='|')
('d', 'e|f')
If not, the subkey will be :data:`None`::
>>> _resolve_subkeys('foo')
('foo', None)
Args:
key (str): A string that may or may not contain the separator.
separator (str): The namespace separator. Defaults to `.`.
Returns:
Tuple[str, str]: The key and subkey(s).
"""
parts = key.split(separator, 1)
if len(parts) > 1:
return parts
else:
return parts[0], None
def get(msg_or_dict, key, default=_SENTINEL):
"""Retrieve a key's value from a protobuf Message or dictionary.
Args:
mdg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to retrieve from the object.
default (Any): If the key is not present on the object, and a default
is set, returns that default instead. A type-appropriate falsy
default is generally recommended, as protobuf messages almost
always have default values for unset values and it is not always
possible to tell the difference between a falsy value and an
unset one. If no default is set then :class:`KeyError` will be
raised if the key is not present in the object.
Returns:
Any: The return value from the underlying Message or dict.
Raises:
KeyError: If the key is not found. Note that, for unset values,
messages and dictionaries may not have consistent behavior.
TypeError: If ``msg_or_dict`` is not a Message or Mapping.
"""
# We may need to get a nested key. Resolve this.
key, subkey = _resolve_subkeys(key)
# Attempt to get the value from the two types of objects we know about.
# If we get something else, complain.
if isinstance(msg_or_dict, message.Message):
answer = getattr(msg_or_dict, key, default)
elif isinstance(msg_or_dict, collections_abc.Mapping):
answer = msg_or_dict.get(key, default)
else:
raise TypeError(
"get() expected a dict or protobuf message, got {!r}.".format(
type(msg_or_dict)
)
)
# If the object we got back is our sentinel, raise KeyError; this is
# a "not found" case.
if answer is _SENTINEL:
raise KeyError(key)
# If a subkey exists, call this method recursively against the answer.
if subkey is not None and answer is not default:
return get(answer, subkey, default=default)
return answer
def _set_field_on_message(msg, key, value):
"""Set helper for protobuf Messages."""
# Attempt to set the value on the types of objects we know how to deal
# with.
if isinstance(value, (collections_abc.MutableSequence, tuple)):
# Clear the existing repeated protobuf message of any elements
# currently inside it.
while getattr(msg, key):
getattr(msg, key).pop()
# Write our new elements to the repeated field.
for item in value:
if isinstance(item, collections_abc.Mapping):
getattr(msg, key).add(**item)
else:
# protobuf's RepeatedCompositeContainer doesn't support
# append.
getattr(msg, key).extend([item])
elif isinstance(value, collections_abc.Mapping):
# Assign the dictionary values to the protobuf message.
for item_key, item_value in value.items():
set(getattr(msg, key), item_key, item_value)
elif isinstance(value, message.Message):
getattr(msg, key).CopyFrom(value)
else:
setattr(msg, key, value)
def set(msg_or_dict, key, value):
"""Set a key's value on a protobuf Message or dictionary.
Args:
msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to set.
value (Any): The value to set.
Raises:
TypeError: If ``msg_or_dict`` is not a Message or dictionary.
"""
# Sanity check: Is our target object valid?
if not isinstance(msg_or_dict, (collections_abc.MutableMapping, message.Message)):
raise TypeError(
"set() expected a dict or protobuf message, got {!r}.".format(
type(msg_or_dict)
)
)
# We may be setting a nested key. Resolve this.
basekey, subkey = _resolve_subkeys(key)
# If a subkey exists, then get that object and call this method
# recursively against it using the subkey.
if subkey is not None:
if isinstance(msg_or_dict, collections_abc.MutableMapping):
msg_or_dict.setdefault(basekey, {})
set(get(msg_or_dict, basekey), subkey, value)
return
if isinstance(msg_or_dict, collections_abc.MutableMapping):
msg_or_dict[key] = value
else:
_set_field_on_message(msg_or_dict, key, value)
def setdefault(msg_or_dict, key, value):
"""Set the key on a protobuf Message or dictionary to a given value if the
current value is falsy.
Because protobuf Messages do not distinguish between unset values and
falsy ones particularly well (by design), this method treats any falsy
value (e.g. 0, empty list) as a target to be overwritten, on both Messages
and dictionaries.
Args:
msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key on the object in question.
value (Any): The value to set.
Raises:
TypeError: If ``msg_or_dict`` is not a Message or dictionary.
"""
if not get(msg_or_dict, key, default=None):
set(msg_or_dict, key, value)
def field_mask(original, modified):
"""Create a field mask by comparing two messages.
Args:
original (~google.protobuf.message.Message): the original message.
If set to None, this field will be interpretted as an empty
message.
modified (~google.protobuf.message.Message): the modified message.
If set to None, this field will be interpretted as an empty
message.
Returns:
google.protobuf.field_mask_pb2.FieldMask: field mask that contains
the list of field names that have different values between the two
messages. If the messages are equivalent, then the field mask is empty.
Raises:
ValueError: If the ``original`` or ``modified`` are not the same type.
"""
if original is None and modified is None:
return field_mask_pb2.FieldMask()
if original is None and modified is not None:
original = copy.deepcopy(modified)
original.Clear()
if modified is None and original is not None:
modified = copy.deepcopy(original)
modified.Clear()
if type(original) != type(modified):
raise ValueError(
"expected that both original and modified should be of the "
'same type, received "{!r}" and "{!r}".'.format(
type(original), type(modified)
)
)
return field_mask_pb2.FieldMask(paths=_field_mask_helper(original, modified))
def _field_mask_helper(original, modified, current=""):
answer = []
for name in original.DESCRIPTOR.fields_by_name:
field_path = _get_path(current, name)
original_val = getattr(original, name)
modified_val = getattr(modified, name)
if _is_message(original_val) or _is_message(modified_val):
if original_val != modified_val:
# Wrapper types do not need to include the .value part of the
# path.
if _is_wrapper(original_val) or _is_wrapper(modified_val):
answer.append(field_path)
elif not modified_val.ListFields():
answer.append(field_path)
else:
answer.extend(
_field_mask_helper(original_val, modified_val, field_path)
)
else:
if original_val != modified_val:
answer.append(field_path)
return answer
def _get_path(current, name):
if not current:
return name
return "%s.%s" % (current, name)
def _is_message(value):
return isinstance(value, message.Message)
def _is_wrapper(value):
return type(value) in _WRAPPER_TYPES

View file

@ -0,0 +1,364 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for retrying functions with exponential back-off.
The :class:`Retry` decorator can be used to retry functions that raise
exceptions using exponential backoff. Because a exponential sleep algorithm is
used, the retry is limited by a `deadline`. The deadline is the maxmimum amount
of time a method can block. This is used instead of total number of retries
because it is difficult to ascertain the amount of time a function can block
when using total number of retries and exponential backoff.
By default, this decorator will retry transient
API errors (see :func:`if_transient_error`). For example:
.. code-block:: python
@retry.Retry()
def call_flaky_rpc():
return client.flaky_rpc()
# Will retry flaky_rpc() if it raises transient API errors.
result = call_flaky_rpc()
You can pass a custom predicate to retry on different exceptions, such as
waiting for an eventually consistent item to be available:
.. code-block:: python
@retry.Retry(predicate=if_exception_type(exceptions.NotFound))
def check_if_exists():
return client.does_thing_exist()
is_available = check_if_exists()
Some client library methods apply retry automatically. These methods can accept
a ``retry`` parameter that allows you to configure the behavior:
.. code-block:: python
my_retry = retry.Retry(deadline=60)
result = client.some_method(retry=my_retry)
"""
from __future__ import unicode_literals
import datetime
import functools
import logging
import random
import time
import six
from google.api_core import datetime_helpers
from google.api_core import exceptions
from google.api_core import general_helpers
_LOGGER = logging.getLogger(__name__)
_DEFAULT_INITIAL_DELAY = 1.0 # seconds
_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds
_DEFAULT_DELAY_MULTIPLIER = 2.0
_DEFAULT_DEADLINE = 60.0 * 2.0 # seconds
def if_exception_type(*exception_types):
"""Creates a predicate to check if the exception is of a given type.
Args:
exception_types (Sequence[:func:`type`]): The exception types to check
for.
Returns:
Callable[Exception]: A predicate that returns True if the provided
exception is of the given type(s).
"""
def if_exception_type_predicate(exception):
"""Bound predicate for checking an exception type."""
return isinstance(exception, exception_types)
return if_exception_type_predicate
# pylint: disable=invalid-name
# Pylint sees this as a constant, but it is also an alias that should be
# considered a function.
if_transient_error = if_exception_type(
exceptions.InternalServerError,
exceptions.TooManyRequests,
exceptions.ServiceUnavailable,
)
"""A predicate that checks if an exception is a transient API error.
The following server errors are considered transient:
- :class:`google.api_core.exceptions.InternalServerError` - HTTP 500, gRPC
``INTERNAL(13)`` and its subclasses.
- :class:`google.api_core.exceptions.TooManyRequests` - HTTP 429
- :class:`google.api_core.exceptions.ServiceUnavailable` - HTTP 503
- :class:`google.api_core.exceptions.ResourceExhausted` - gRPC
``RESOURCE_EXHAUSTED(8)``
"""
# pylint: enable=invalid-name
def exponential_sleep_generator(initial, maximum, multiplier=_DEFAULT_DELAY_MULTIPLIER):
"""Generates sleep intervals based on the exponential back-off algorithm.
This implements the `Truncated Exponential Back-off`_ algorithm.
.. _Truncated Exponential Back-off:
https://cloud.google.com/storage/docs/exponential-backoff
Args:
initial (float): The minimum amount of time to delay. This must
be greater than 0.
maximum (float): The maximum amount of time to delay.
multiplier (float): The multiplier applied to the delay.
Yields:
float: successive sleep intervals.
"""
delay = initial
while True:
# Introduce jitter by yielding a delay that is uniformly distributed
# to average out to the delay time.
yield min(random.uniform(0.0, delay * 2.0), maximum)
delay = delay * multiplier
def retry_target(target, predicate, sleep_generator, deadline, on_error=None):
"""Call a function and retry if it fails.
This is the lowest-level retry helper. Generally, you'll use the
higher-level retry helper :class:`Retry`.
Args:
target(Callable): The function to call and retry. This must be a
nullary function - apply arguments with `functools.partial`.
predicate (Callable[Exception]): A callable used to determine if an
exception raised by the target should be considered retryable.
It should return True to retry or False otherwise.
sleep_generator (Iterable[float]): An infinite iterator that determines
how long to sleep between retries.
deadline (float): How long to keep retrying the target. The last sleep
period is shortened as necessary, so that the last retry runs at
``deadline`` (and not considerably beyond it).
on_error (Callable[Exception]): A function to call while processing a
retryable exception. Any error raised by this function will *not*
be caught.
Returns:
Any: the return value of the target function.
Raises:
google.api_core.RetryError: If the deadline is exceeded while retrying.
ValueError: If the sleep generator stops yielding values.
Exception: If the target raises a method that isn't retryable.
"""
if deadline is not None:
deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta(
seconds=deadline
)
else:
deadline_datetime = None
last_exc = None
for sleep in sleep_generator:
try:
return target()
# pylint: disable=broad-except
# This function explicitly must deal with broad exceptions.
except Exception as exc:
if not predicate(exc):
raise
last_exc = exc
if on_error is not None:
on_error(exc)
now = datetime_helpers.utcnow()
if deadline_datetime is not None:
if deadline_datetime <= now:
six.raise_from(
exceptions.RetryError(
"Deadline of {:.1f}s exceeded while calling {}".format(
deadline, target
),
last_exc,
),
last_exc,
)
else:
time_to_deadline = (deadline_datetime - now).total_seconds()
sleep = min(time_to_deadline, sleep)
_LOGGER.debug(
"Retrying due to {}, sleeping {:.1f}s ...".format(last_exc, sleep)
)
time.sleep(sleep)
raise ValueError("Sleep generator stopped yielding sleep values.")
@six.python_2_unicode_compatible
class Retry(object):
"""Exponential retry decorator.
This class is a decorator used to add exponential back-off retry behavior
to an RPC call.
Although the default behavior is to retry transient API errors, a
different predicate can be provided to retry other exceptions.
Args:
predicate (Callable[Exception]): A callable that should return ``True``
if the given exception is retryable.
initial (float): The minimum a,out of time to delay in seconds. This
must be greater than 0.
maximum (float): The maximum amout of time to delay in seconds.
multiplier (float): The multiplier applied to the delay.
deadline (float): How long to keep retrying in seconds. The last sleep
period is shortened as necessary, so that the last retry runs at
``deadline`` (and not considerably beyond it).
"""
def __init__(
self,
predicate=if_transient_error,
initial=_DEFAULT_INITIAL_DELAY,
maximum=_DEFAULT_MAXIMUM_DELAY,
multiplier=_DEFAULT_DELAY_MULTIPLIER,
deadline=_DEFAULT_DEADLINE,
on_error=None,
):
self._predicate = predicate
self._initial = initial
self._multiplier = multiplier
self._maximum = maximum
self._deadline = deadline
self._on_error = on_error
def __call__(self, func, on_error=None):
"""Wrap a callable with retry behavior.
Args:
func (Callable): The callable to add retry behavior to.
on_error (Callable[Exception]): A function to call while processing
a retryable exception. Any error raised by this function will
*not* be caught.
Returns:
Callable: A callable that will invoke ``func`` with retry
behavior.
"""
if self._on_error is not None:
on_error = self._on_error
@general_helpers.wraps(func)
def retry_wrapped_func(*args, **kwargs):
"""A wrapper that calls target function with retry."""
target = functools.partial(func, *args, **kwargs)
sleep_generator = exponential_sleep_generator(
self._initial, self._maximum, multiplier=self._multiplier
)
return retry_target(
target,
self._predicate,
sleep_generator,
self._deadline,
on_error=on_error,
)
return retry_wrapped_func
@property
def deadline(self):
return self._deadline
def with_deadline(self, deadline):
"""Return a copy of this retry with the given deadline.
Args:
deadline (float): How long to keep retrying.
Returns:
Retry: A new retry instance with the given deadline.
"""
return Retry(
predicate=self._predicate,
initial=self._initial,
maximum=self._maximum,
multiplier=self._multiplier,
deadline=deadline,
on_error=self._on_error,
)
def with_predicate(self, predicate):
"""Return a copy of this retry with the given predicate.
Args:
predicate (Callable[Exception]): A callable that should return
``True`` if the given exception is retryable.
Returns:
Retry: A new retry instance with the given predicate.
"""
return Retry(
predicate=predicate,
initial=self._initial,
maximum=self._maximum,
multiplier=self._multiplier,
deadline=self._deadline,
on_error=self._on_error,
)
def with_delay(self, initial=None, maximum=None, multiplier=None):
"""Return a copy of this retry with the given delay options.
Args:
initial (float): The minimum amount of time to delay. This must
be greater than 0.
maximum (float): The maximum amount of time to delay.
multiplier (float): The multiplier applied to the delay.
Returns:
Retry: A new retry instance with the given predicate.
"""
return Retry(
predicate=self._predicate,
initial=initial if initial is not None else self._initial,
maximum=maximum if maximum is not None else self._maximum,
multiplier=multiplier if maximum is not None else self._multiplier,
deadline=self._deadline,
on_error=self._on_error,
)
def __str__(self):
return (
"<Retry predicate={}, initial={:.1f}, maximum={:.1f}, "
"multiplier={:.1f}, deadline={:.1f}, on_error={}>".format(
self._predicate,
self._initial,
self._maximum,
self._multiplier,
self._deadline,
self._on_error,
)
)

View file

@ -0,0 +1,282 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for retrying coroutine functions with exponential back-off.
The :class:`AsyncRetry` decorator shares most functionality and behavior with
:class:`Retry`, but supports coroutine functions. Please refer to description
of :class:`Retry` for more details.
By default, this decorator will retry transient
API errors (see :func:`if_transient_error`). For example:
.. code-block:: python
@retry_async.AsyncRetry()
async def call_flaky_rpc():
return await client.flaky_rpc()
# Will retry flaky_rpc() if it raises transient API errors.
result = await call_flaky_rpc()
You can pass a custom predicate to retry on different exceptions, such as
waiting for an eventually consistent item to be available:
.. code-block:: python
@retry_async.AsyncRetry(predicate=retry_async.if_exception_type(exceptions.NotFound))
async def check_if_exists():
return await client.does_thing_exist()
is_available = await check_if_exists()
Some client library methods apply retry automatically. These methods can accept
a ``retry`` parameter that allows you to configure the behavior:
.. code-block:: python
my_retry = retry_async.AsyncRetry(deadline=60)
result = await client.some_method(retry=my_retry)
"""
import asyncio
import datetime
import functools
import logging
from google.api_core import datetime_helpers, exceptions
from google.api_core.retry import (exponential_sleep_generator, # noqa: F401
if_exception_type, if_transient_error)
_LOGGER = logging.getLogger(__name__)
_DEFAULT_INITIAL_DELAY = 1.0 # seconds
_DEFAULT_MAXIMUM_DELAY = 60.0 # seconds
_DEFAULT_DELAY_MULTIPLIER = 2.0
_DEFAULT_DEADLINE = 60.0 * 2.0 # seconds
async def retry_target(target, predicate, sleep_generator, deadline, on_error=None):
"""Call a function and retry if it fails.
This is the lowest-level retry helper. Generally, you'll use the
higher-level retry helper :class:`Retry`.
Args:
target(Callable): The function to call and retry. This must be a
nullary function - apply arguments with `functools.partial`.
predicate (Callable[Exception]): A callable used to determine if an
exception raised by the target should be considered retryable.
It should return True to retry or False otherwise.
sleep_generator (Iterable[float]): An infinite iterator that determines
how long to sleep between retries.
deadline (float): How long to keep retrying the target. The last sleep
period is shortened as necessary, so that the last retry runs at
``deadline`` (and not considerably beyond it).
on_error (Callable[Exception]): A function to call while processing a
retryable exception. Any error raised by this function will *not*
be caught.
Returns:
Any: the return value of the target function.
Raises:
google.api_core.RetryError: If the deadline is exceeded while retrying.
ValueError: If the sleep generator stops yielding values.
Exception: If the target raises a method that isn't retryable.
"""
deadline_dt = (datetime_helpers.utcnow() + datetime.timedelta(seconds=deadline)) if deadline else None
last_exc = None
for sleep in sleep_generator:
try:
if not deadline_dt:
return await target()
else:
return await asyncio.wait_for(
target(),
timeout=(deadline_dt - datetime_helpers.utcnow()).total_seconds()
)
# pylint: disable=broad-except
# This function explicitly must deal with broad exceptions.
except Exception as exc:
if not predicate(exc) and not isinstance(exc, asyncio.TimeoutError):
raise
last_exc = exc
if on_error is not None:
on_error(exc)
now = datetime_helpers.utcnow()
if deadline_dt:
if deadline_dt <= now:
# Chains the raising RetryError with the root cause error,
# which helps observability and debugability.
raise exceptions.RetryError(
"Deadline of {:.1f}s exceeded while calling {}".format(
deadline, target
),
last_exc,
) from last_exc
else:
time_to_deadline = (deadline_dt - now).total_seconds()
sleep = min(time_to_deadline, sleep)
_LOGGER.debug(
"Retrying due to {}, sleeping {:.1f}s ...".format(last_exc, sleep)
)
await asyncio.sleep(sleep)
raise ValueError("Sleep generator stopped yielding sleep values.")
class AsyncRetry:
"""Exponential retry decorator for async functions.
This class is a decorator used to add exponential back-off retry behavior
to an RPC call.
Although the default behavior is to retry transient API errors, a
different predicate can be provided to retry other exceptions.
Args:
predicate (Callable[Exception]): A callable that should return ``True``
if the given exception is retryable.
initial (float): The minimum a,out of time to delay in seconds. This
must be greater than 0.
maximum (float): The maximum amout of time to delay in seconds.
multiplier (float): The multiplier applied to the delay.
deadline (float): How long to keep retrying in seconds. The last sleep
period is shortened as necessary, so that the last retry runs at
``deadline`` (and not considerably beyond it).
on_error (Callable[Exception]): A function to call while processing
a retryable exception. Any error raised by this function will
*not* be caught.
"""
def __init__(
self,
predicate=if_transient_error,
initial=_DEFAULT_INITIAL_DELAY,
maximum=_DEFAULT_MAXIMUM_DELAY,
multiplier=_DEFAULT_DELAY_MULTIPLIER,
deadline=_DEFAULT_DEADLINE,
on_error=None,
):
self._predicate = predicate
self._initial = initial
self._multiplier = multiplier
self._maximum = maximum
self._deadline = deadline
self._on_error = on_error
def __call__(self, func, on_error=None):
"""Wrap a callable with retry behavior.
Args:
func (Callable): The callable to add retry behavior to.
on_error (Callable[Exception]): A function to call while processing
a retryable exception. Any error raised by this function will
*not* be caught.
Returns:
Callable: A callable that will invoke ``func`` with retry
behavior.
"""
if self._on_error is not None:
on_error = self._on_error
@functools.wraps(func)
async def retry_wrapped_func(*args, **kwargs):
"""A wrapper that calls target function with retry."""
target = functools.partial(func, *args, **kwargs)
sleep_generator = exponential_sleep_generator(
self._initial, self._maximum, multiplier=self._multiplier
)
return await retry_target(
target,
self._predicate,
sleep_generator,
self._deadline,
on_error=on_error,
)
return retry_wrapped_func
def _replace(self,
predicate=None,
initial=None,
maximum=None,
multiplier=None,
deadline=None,
on_error=None):
return AsyncRetry(
predicate=predicate or self._predicate,
initial=initial or self._initial,
maximum=maximum or self._maximum,
multiplier=multiplier or self._multiplier,
deadline=deadline or self._deadline,
on_error=on_error or self._on_error,
)
def with_deadline(self, deadline):
"""Return a copy of this retry with the given deadline.
Args:
deadline (float): How long to keep retrying.
Returns:
AsyncRetry: A new retry instance with the given deadline.
"""
return self._replace(deadline=deadline)
def with_predicate(self, predicate):
"""Return a copy of this retry with the given predicate.
Args:
predicate (Callable[Exception]): A callable that should return
``True`` if the given exception is retryable.
Returns:
AsyncRetry: A new retry instance with the given predicate.
"""
return self._replace(predicate=predicate)
def with_delay(self, initial=None, maximum=None, multiplier=None):
"""Return a copy of this retry with the given delay options.
Args:
initial (float): The minimum amout of time to delay. This must
be greater than 0.
maximum (float): The maximum amout of time to delay.
multiplier (float): The multiplier applied to the delay.
Returns:
AsyncRetry: A new retry instance with the given predicate.
"""
return self._replace(initial=initial, maximum=maximum, multiplier=multiplier)
def __str__(self):
return (
"<AsyncRetry predicate={}, initial={:.1f}, maximum={:.1f}, "
"multiplier={:.1f}, deadline={:.1f}, on_error={}>".format(
self._predicate,
self._initial,
self._maximum,
self._multiplier,
self._deadline,
self._on_error,
)
)

View file

@ -0,0 +1,224 @@
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decorators for applying timeout arguments to functions.
These decorators are used to wrap API methods to apply either a constant
or exponential timeout argument.
For example, imagine an API method that can take a while to return results,
such as one that might block until a resource is ready:
.. code-block:: python
def is_thing_ready(timeout=None):
response = requests.get('https://example.com/is_thing_ready')
response.raise_for_status()
return response.json()
This module allows a function like this to be wrapped so that timeouts are
automatically determined, for example:
.. code-block:: python
timeout_ = timeout.ExponentialTimeout()
is_thing_ready_with_timeout = timeout_(is_thing_ready)
for n in range(10):
try:
is_thing_ready_with_timeout({'example': 'data'})
except:
pass
In this example the first call to ``is_thing_ready`` will have a relatively
small timeout (like 1 second). If the resource is available and the request
completes quickly, the loop exits. But, if the resource isn't yet available
and the request times out, it'll be retried - this time with a larger timeout.
In the broader context these decorators are typically combined with
:mod:`google.api_core.retry` to implement API methods with a signature that
matches ``api_method(request, timeout=None, retry=None)``.
"""
from __future__ import unicode_literals
import datetime
import six
from google.api_core import datetime_helpers
from google.api_core import general_helpers
_DEFAULT_INITIAL_TIMEOUT = 5.0 # seconds
_DEFAULT_MAXIMUM_TIMEOUT = 30.0 # seconds
_DEFAULT_TIMEOUT_MULTIPLIER = 2.0
# If specified, must be in seconds. If none, deadline is not used in the
# timeout calculation.
_DEFAULT_DEADLINE = None
@six.python_2_unicode_compatible
class ConstantTimeout(object):
"""A decorator that adds a constant timeout argument.
This is effectively equivalent to
``functools.partial(func, timeout=timeout)``.
Args:
timeout (Optional[float]): the timeout (in seconds) to applied to the
wrapped function. If `None`, the target function is expected to
never timeout.
"""
def __init__(self, timeout=None):
self._timeout = timeout
def __call__(self, func):
"""Apply the timeout decorator.
Args:
func (Callable): The function to apply the timeout argument to.
This function must accept a timeout keyword argument.
Returns:
Callable: The wrapped function.
"""
@general_helpers.wraps(func)
def func_with_timeout(*args, **kwargs):
"""Wrapped function that adds timeout."""
kwargs["timeout"] = self._timeout
return func(*args, **kwargs)
return func_with_timeout
def __str__(self):
return "<ConstantTimeout timeout={:.1f}>".format(self._timeout)
def _exponential_timeout_generator(initial, maximum, multiplier, deadline):
"""A generator that yields exponential timeout values.
Args:
initial (float): The initial timeout.
maximum (float): The maximum timeout.
multiplier (float): The multiplier applied to the timeout.
deadline (float): The overall deadline across all invocations.
Yields:
float: A timeout value.
"""
if deadline is not None:
deadline_datetime = datetime_helpers.utcnow() + datetime.timedelta(
seconds=deadline
)
else:
deadline_datetime = datetime.datetime.max
timeout = initial
while True:
now = datetime_helpers.utcnow()
yield min(
# The calculated timeout based on invocations.
timeout,
# The set maximum timeout.
maximum,
# The remaining time before the deadline is reached.
float((deadline_datetime - now).seconds),
)
timeout = timeout * multiplier
@six.python_2_unicode_compatible
class ExponentialTimeout(object):
"""A decorator that adds an exponentially increasing timeout argument.
This is useful if a function is called multiple times. Each time the
function is called this decorator will calculate a new timeout parameter
based on the the number of times the function has been called.
For example
.. code-block:: python
Args:
initial (float): The initial timeout to pass.
maximum (float): The maximum timeout for any one call.
multiplier (float): The multiplier applied to the timeout for each
invocation.
deadline (Optional[float]): The overall deadline across all
invocations. This is used to prevent a very large calculated
timeout from pushing the overall execution time over the deadline.
This is especially useful in conjuction with
:mod:`google.api_core.retry`. If ``None``, the timeouts will not
be adjusted to accomodate an overall deadline.
"""
def __init__(
self,
initial=_DEFAULT_INITIAL_TIMEOUT,
maximum=_DEFAULT_MAXIMUM_TIMEOUT,
multiplier=_DEFAULT_TIMEOUT_MULTIPLIER,
deadline=_DEFAULT_DEADLINE,
):
self._initial = initial
self._maximum = maximum
self._multiplier = multiplier
self._deadline = deadline
def with_deadline(self, deadline):
"""Return a copy of this teimout with the given deadline.
Args:
deadline (float): The overall deadline across all invocations.
Returns:
ExponentialTimeout: A new instance with the given deadline.
"""
return ExponentialTimeout(
initial=self._initial,
maximum=self._maximum,
multiplier=self._multiplier,
deadline=deadline,
)
def __call__(self, func):
"""Apply the timeout decorator.
Args:
func (Callable): The function to apply the timeout argument to.
This function must accept a timeout keyword argument.
Returns:
Callable: The wrapped function.
"""
timeouts = _exponential_timeout_generator(
self._initial, self._maximum, self._multiplier, self._deadline
)
@general_helpers.wraps(func)
def func_with_timeout(*args, **kwargs):
"""Wrapped function that adds timeout."""
kwargs["timeout"] = next(timeouts)
return func(*args, **kwargs)
return func_with_timeout
def __str__(self):
return (
"<ExponentialTimeout initial={:.1f}, maximum={:.1f}, "
"multiplier={:.1f}, deadline={:.1f}>".format(
self._initial, self._maximum, self._multiplier, self._deadline
)
)

View file

@ -0,0 +1,15 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.22.4"