Added delete option to database storage.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-12 12:10:01 -04:00
parent 308604a33c
commit 963b5bc68b
1868 changed files with 192402 additions and 13278 deletions

View file

@ -0,0 +1,27 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())

View file

@ -0,0 +1,162 @@
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for authentication using oauth2client or google-auth."""
import httplib2
try:
import google.auth
import google.auth.credentials
HAS_GOOGLE_AUTH = True
except ImportError: # pragma: NO COVER
HAS_GOOGLE_AUTH = False
try:
import google_auth_httplib2
except ImportError: # pragma: NO COVER
google_auth_httplib2 = None
try:
import oauth2client
import oauth2client.client
HAS_OAUTH2CLIENT = True
except ImportError: # pragma: NO COVER
HAS_OAUTH2CLIENT = False
def credentials_from_file(filename, scopes=None, quota_project_id=None):
"""Returns credentials loaded from a file."""
if HAS_GOOGLE_AUTH:
credentials, _ = google.auth.load_credentials_from_file(filename, scopes=scopes, quota_project_id=quota_project_id)
return credentials
else:
raise EnvironmentError(
"client_options.credentials_file is only supported in google-auth.")
def default_credentials(scopes=None, quota_project_id=None):
"""Returns Application Default Credentials."""
if HAS_GOOGLE_AUTH:
credentials, _ = google.auth.default(scopes=scopes, quota_project_id=quota_project_id)
return credentials
elif HAS_OAUTH2CLIENT:
if scopes is not None or quota_project_id is not None:
raise EnvironmentError(
"client_options.scopes and client_options.quota_project_id are not supported in oauth2client."
"Please install google-auth."
)
return oauth2client.client.GoogleCredentials.get_application_default()
else:
raise EnvironmentError(
"No authentication library is available. Please install either "
"google-auth or oauth2client."
)
def with_scopes(credentials, scopes):
"""Scopes the credentials if necessary.
Args:
credentials (Union[
google.auth.credentials.Credentials,
oauth2client.client.Credentials]): The credentials to scope.
scopes (Sequence[str]): The list of scopes.
Returns:
Union[google.auth.credentials.Credentials,
oauth2client.client.Credentials]: The scoped credentials.
"""
if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
return google.auth.credentials.with_scopes_if_required(credentials, scopes)
else:
try:
if credentials.create_scoped_required():
return credentials.create_scoped(scopes)
else:
return credentials
except AttributeError:
return credentials
def authorized_http(credentials):
"""Returns an http client that is authorized with the given credentials.
Args:
credentials (Union[
google.auth.credentials.Credentials,
oauth2client.client.Credentials]): The credentials to use.
Returns:
Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An
authorized http client.
"""
from googleapiclient.http import build_http
if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
if google_auth_httplib2 is None:
raise ValueError(
"Credentials from google.auth specified, but "
"google-api-python-client is unable to use these credentials "
"unless google-auth-httplib2 is installed. Please install "
"google-auth-httplib2."
)
return google_auth_httplib2.AuthorizedHttp(credentials, http=build_http())
else:
return credentials.authorize(build_http())
def refresh_credentials(credentials):
# Refresh must use a new http instance, as the one associated with the
# credentials could be a AuthorizedHttp or an oauth2client-decorated
# Http instance which would cause a weird recursive loop of refreshing
# and likely tear a hole in spacetime.
refresh_http = httplib2.Http()
if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
request = google_auth_httplib2.Request(refresh_http)
return credentials.refresh(request)
else:
return credentials.refresh(refresh_http)
def apply_credentials(credentials, headers):
# oauth2client and google-auth have the same interface for this.
if not is_valid(credentials):
refresh_credentials(credentials)
return credentials.apply(headers)
def is_valid(credentials):
if HAS_GOOGLE_AUTH and isinstance(credentials, google.auth.credentials.Credentials):
return credentials.valid
else:
return (
credentials.access_token is not None
and not credentials.access_token_expired
)
def get_credentials_from_http(http):
if http is None:
return None
elif hasattr(http.request, "credentials"):
return http.request.credentials
elif hasattr(http, "credentials") and not isinstance(
http.credentials, httplib2.Credentials
):
return http.credentials
else:
return None

View file

@ -0,0 +1,211 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for commonly used utilities."""
import functools
import inspect
import logging
import warnings
import six
from six.moves import urllib
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = "WARNING"
POSITIONAL_EXCEPTION = "EXCEPTION"
POSITIONAL_IGNORE = "IGNORE"
POSITIONAL_SET = frozenset(
[POSITIONAL_WARNING, POSITIONAL_EXCEPTION, POSITIONAL_IGNORE]
)
positional_parameters_enforcement = POSITIONAL_WARNING
_SYM_LINK_MESSAGE = "File: {0}: Is a symbolic link."
_IS_DIR_MESSAGE = "{0}: Is a directory"
_MISSING_FILE_MESSAGE = "Cannot access {0}: No such file or directory"
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments may be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write::
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after ``*`` must be a keyword::
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example
^^^^^^^
To define a function like above, do::
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a
required keyword argument::
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter::
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
``self`` and ``cls``::
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
``_helpers.positional_parameters_enforcement``, which may be set to
``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
nothing, respectively, if a declaration is violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be
keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args
from being used as positional parameters.
Raises:
TypeError: if a key-word only argument is provided as a positional
parameter, but only if
_helpers.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
@functools.wraps(wrapped)
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ""
if max_positional_args != 1:
plural_s = "s"
message = (
"{function}() takes at most {args_max} positional "
"argument{plural} ({args_given} given)".format(
function=wrapped.__name__,
args_max=max_positional_args,
args_given=len(args),
plural=plural_s,
)
)
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, six.integer_types):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def parse_unique_urlencoded(content):
"""Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated.
"""
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for key, value in six.iteritems(urlencoded_params):
if len(value) != 1:
msg = "URL-encoded content contains a repeated value:" "%s -> %s" % (
key,
", ".join(value),
)
raise ValueError(msg)
params[key] = value[0]
return params
def update_query_params(uri, params):
"""Updates a URI with new query parameters.
If a given key from ``params`` is repeated in the ``uri``, then
the URI will be considered invalid and an error will occur.
If the URI is valid, then each value from ``params`` will
replace the corresponding value in the query parameters (if
it exists).
Args:
uri: string, A valid URI, with potential existing query parameters.
params: dict, A dictionary of query parameters.
Returns:
The same URI but with the new query parameters added.
"""
parts = urllib.parse.urlparse(uri)
query_params = parse_unique_urlencoded(parts.query)
query_params.update(params)
new_query = urllib.parse.urlencode(query_params)
new_parts = parts._replace(query=new_query)
return urllib.parse.urlunparse(new_parts)
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
return update_query_params(url, {name: value})

View file

@ -0,0 +1,317 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Channel notifications support.
Classes and functions to support channel subscriptions and notifications
on those channels.
Notes:
- This code is based on experimental APIs and is subject to change.
- Notification does not do deduplication of notification ids, that's up to
the receiver.
- Storing the Channel between calls is up to the caller.
Example setting up a channel:
# Create a new channel that gets notifications via webhook.
channel = new_webhook_channel("https://example.com/my_web_hook")
# Store the channel, keyed by 'channel.id'. Store it before calling the
# watch method because notifications may start arriving before the watch
# method returns.
...
resp = service.objects().watchAll(
bucket="some_bucket_id", body=channel.body()).execute()
channel.update(resp)
# Store the channel, keyed by 'channel.id'. Store it after being updated
# since the resource_id value will now be correct, and that's needed to
# stop a subscription.
...
An example Webhook implementation using webapp2. Note that webapp2 puts
headers in a case insensitive dictionary, as headers aren't guaranteed to
always be upper case.
id = self.request.headers[X_GOOG_CHANNEL_ID]
# Retrieve the channel by id.
channel = ...
# Parse notification from the headers, including validating the id.
n = notification_from_headers(channel, self.request.headers)
# Do app specific stuff with the notification here.
if n.resource_state == 'sync':
# Code to handle sync state.
elif n.resource_state == 'exists':
# Code to handle the exists state.
elif n.resource_state == 'not_exists':
# Code to handle the not exists state.
Example of unsubscribing.
service.channels().stop(channel.body()).execute()
"""
from __future__ import absolute_import
import datetime
import uuid
from googleapiclient import errors
from googleapiclient import _helpers as util
import six
# The unix time epoch starts at midnight 1970.
EPOCH = datetime.datetime.utcfromtimestamp(0)
# Map the names of the parameters in the JSON channel description to
# the parameter names we use in the Channel class.
CHANNEL_PARAMS = {
"address": "address",
"id": "id",
"expiration": "expiration",
"params": "params",
"resourceId": "resource_id",
"resourceUri": "resource_uri",
"type": "type",
"token": "token",
}
X_GOOG_CHANNEL_ID = "X-GOOG-CHANNEL-ID"
X_GOOG_MESSAGE_NUMBER = "X-GOOG-MESSAGE-NUMBER"
X_GOOG_RESOURCE_STATE = "X-GOOG-RESOURCE-STATE"
X_GOOG_RESOURCE_URI = "X-GOOG-RESOURCE-URI"
X_GOOG_RESOURCE_ID = "X-GOOG-RESOURCE-ID"
def _upper_header_keys(headers):
new_headers = {}
for k, v in six.iteritems(headers):
new_headers[k.upper()] = v
return new_headers
class Notification(object):
"""A Notification from a Channel.
Notifications are not usually constructed directly, but are returned
from functions like notification_from_headers().
Attributes:
message_number: int, The unique id number of this notification.
state: str, The state of the resource being monitored.
uri: str, The address of the resource being monitored.
resource_id: str, The unique identifier of the version of the resource at
this event.
"""
@util.positional(5)
def __init__(self, message_number, state, resource_uri, resource_id):
"""Notification constructor.
Args:
message_number: int, The unique id number of this notification.
state: str, The state of the resource being monitored. Can be one
of "exists", "not_exists", or "sync".
resource_uri: str, The address of the resource being monitored.
resource_id: str, The identifier of the watched resource.
"""
self.message_number = message_number
self.state = state
self.resource_uri = resource_uri
self.resource_id = resource_id
class Channel(object):
"""A Channel for notifications.
Usually not constructed directly, instead it is returned from helper
functions like new_webhook_channel().
Attributes:
type: str, The type of delivery mechanism used by this channel. For
example, 'web_hook'.
id: str, A UUID for the channel.
token: str, An arbitrary string associated with the channel that
is delivered to the target address with each event delivered
over this channel.
address: str, The address of the receiving entity where events are
delivered. Specific to the channel type.
expiration: int, The time, in milliseconds from the epoch, when this
channel will expire.
params: dict, A dictionary of string to string, with additional parameters
controlling delivery channel behavior.
resource_id: str, An opaque id that identifies the resource that is
being watched. Stable across different API versions.
resource_uri: str, The canonicalized ID of the watched resource.
"""
@util.positional(5)
def __init__(
self,
type,
id,
token,
address,
expiration=None,
params=None,
resource_id="",
resource_uri="",
):
"""Create a new Channel.
In user code, this Channel constructor will not typically be called
manually since there are functions for creating channels for each specific
type with a more customized set of arguments to pass.
Args:
type: str, The type of delivery mechanism used by this channel. For
example, 'web_hook'.
id: str, A UUID for the channel.
token: str, An arbitrary string associated with the channel that
is delivered to the target address with each event delivered
over this channel.
address: str, The address of the receiving entity where events are
delivered. Specific to the channel type.
expiration: int, The time, in milliseconds from the epoch, when this
channel will expire.
params: dict, A dictionary of string to string, with additional parameters
controlling delivery channel behavior.
resource_id: str, An opaque id that identifies the resource that is
being watched. Stable across different API versions.
resource_uri: str, The canonicalized ID of the watched resource.
"""
self.type = type
self.id = id
self.token = token
self.address = address
self.expiration = expiration
self.params = params
self.resource_id = resource_id
self.resource_uri = resource_uri
def body(self):
"""Build a body from the Channel.
Constructs a dictionary that's appropriate for passing into watch()
methods as the value of body argument.
Returns:
A dictionary representation of the channel.
"""
result = {
"id": self.id,
"token": self.token,
"type": self.type,
"address": self.address,
}
if self.params:
result["params"] = self.params
if self.resource_id:
result["resourceId"] = self.resource_id
if self.resource_uri:
result["resourceUri"] = self.resource_uri
if self.expiration:
result["expiration"] = self.expiration
return result
def update(self, resp):
"""Update a channel with information from the response of watch().
When a request is sent to watch() a resource, the response returned
from the watch() request is a dictionary with updated channel information,
such as the resource_id, which is needed when stopping a subscription.
Args:
resp: dict, The response from a watch() method.
"""
for json_name, param_name in six.iteritems(CHANNEL_PARAMS):
value = resp.get(json_name)
if value is not None:
setattr(self, param_name, value)
def notification_from_headers(channel, headers):
"""Parse a notification from the webhook request headers, validate
the notification, and return a Notification object.
Args:
channel: Channel, The channel that the notification is associated with.
headers: dict, A dictionary like object that contains the request headers
from the webhook HTTP request.
Returns:
A Notification object.
Raises:
errors.InvalidNotificationError if the notification is invalid.
ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.
"""
headers = _upper_header_keys(headers)
channel_id = headers[X_GOOG_CHANNEL_ID]
if channel.id != channel_id:
raise errors.InvalidNotificationError(
"Channel id mismatch: %s != %s" % (channel.id, channel_id)
)
else:
message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
state = headers[X_GOOG_RESOURCE_STATE]
resource_uri = headers[X_GOOG_RESOURCE_URI]
resource_id = headers[X_GOOG_RESOURCE_ID]
return Notification(message_number, state, resource_uri, resource_id)
@util.positional(2)
def new_webhook_channel(url, token=None, expiration=None, params=None):
"""Create a new webhook Channel.
Args:
url: str, URL to post notifications to.
token: str, An arbitrary string associated with the channel that
is delivered to the target address with each notification delivered
over this channel.
expiration: datetime.datetime, A time in the future when the channel
should expire. Can also be None if the subscription should use the
default expiration. Note that different services may have different
limits on how long a subscription lasts. Check the response from the
watch() method to see the value the service has set for an expiration
time.
params: dict, Extra parameters to pass on channel creation. Currently
not used for webhook channels.
"""
expiration_ms = 0
if expiration:
delta = expiration - EPOCH
expiration_ms = (
delta.microseconds / 1000 + (delta.seconds + delta.days * 24 * 3600) * 1000
)
if expiration_ms < 0:
expiration_ms = 0
return Channel(
"web_hook",
str(uuid.uuid4()),
token,
url,
expiration=expiration_ms,
params=params,
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,49 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Caching utility for the discovery document."""
from __future__ import absolute_import
import logging
import datetime
import os
LOGGER = logging.getLogger(__name__)
DISCOVERY_DOC_MAX_AGE = 60 * 60 * 24 # 1 day
def autodetect():
"""Detects an appropriate cache module and returns it.
Returns:
googleapiclient.discovery_cache.base.Cache, a cache object which
is auto detected, or None if no cache object is available.
"""
if 'APPENGINE_RUNTIME' in os.environ:
try:
from google.appengine.api import memcache
from . import appengine_memcache
return appengine_memcache.cache
except Exception:
pass
try:
from . import file_cache
return file_cache.cache
except Exception as e:
LOGGER.warning(e, exc_info=True)
return None

View file

@ -0,0 +1,56 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine memcache based cache for the discovery document."""
import logging
# This is only an optional dependency because we only import this
# module when google.appengine.api.memcache is available.
from google.appengine.api import memcache
from . import base
from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
LOGGER = logging.getLogger(__name__)
NAMESPACE = "google-api-client"
class Cache(base.Cache):
"""A cache with app engine memcache API."""
def __init__(self, max_age):
"""Constructor.
Args:
max_age: Cache expiration in seconds.
"""
self._max_age = max_age
def get(self, url):
try:
return memcache.get(url, namespace=NAMESPACE)
except Exception as e:
LOGGER.warning(e, exc_info=True)
def set(self, url, content):
try:
memcache.set(url, content, time=int(self._max_age), namespace=NAMESPACE)
except Exception as e:
LOGGER.warning(e, exc_info=True)
cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)

View file

@ -0,0 +1,46 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract class for caching the discovery document."""
import abc
class Cache(object):
"""A base abstract cache class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get(self, url):
"""Gets the content from the memcache with a given key.
Args:
url: string, the key for the cache.
Returns:
object, the value in the cache for the given key, or None if the key is
not in the cache.
"""
raise NotImplementedError()
@abc.abstractmethod
def set(self, url, content):
"""Sets the given key and content in the cache.
Args:
url: string, the key for the cache.
content: string, the discovery document.
"""
raise NotImplementedError()

View file

@ -0,0 +1,146 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File based cache for the discovery document.
The cache is stored in a single file so that multiple processes can
share the same cache. It locks the file whenever accesing to the
file. When the cache content is corrupted, it will be initialized with
an empty cache.
"""
from __future__ import division
import datetime
import json
import logging
import os
import tempfile
import threading
try:
from oauth2client.contrib.locked_file import LockedFile
except ImportError:
# oauth2client < 2.0.0
try:
from oauth2client.locked_file import LockedFile
except ImportError:
# oauth2client > 4.0.0 or google-auth
raise ImportError(
"file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth"
)
from . import base
from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
LOGGER = logging.getLogger(__name__)
FILENAME = "google-api-python-client-discovery-doc.cache"
EPOCH = datetime.datetime.utcfromtimestamp(0)
def _to_timestamp(date):
try:
return (date - EPOCH).total_seconds()
except AttributeError:
# The following is the equivalent of total_seconds() in Python2.6.
# See also: https://docs.python.org/2/library/datetime.html
delta = date - EPOCH
return (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) / 10 ** 6
def _read_or_initialize_cache(f):
f.file_handle().seek(0)
try:
cache = json.load(f.file_handle())
except Exception:
# This means it opens the file for the first time, or the cache is
# corrupted, so initializing the file with an empty dict.
cache = {}
f.file_handle().truncate(0)
f.file_handle().seek(0)
json.dump(cache, f.file_handle())
return cache
class Cache(base.Cache):
"""A file based cache for the discovery documents."""
def __init__(self, max_age):
"""Constructor.
Args:
max_age: Cache expiration in seconds.
"""
self._max_age = max_age
self._file = os.path.join(tempfile.gettempdir(), FILENAME)
f = LockedFile(self._file, "a+", "r")
try:
f.open_and_lock()
if f.is_locked():
_read_or_initialize_cache(f)
# If we can not obtain the lock, other process or thread must
# have initialized the file.
except Exception as e:
LOGGER.warning(e, exc_info=True)
finally:
f.unlock_and_close()
def get(self, url):
f = LockedFile(self._file, "r+", "r")
try:
f.open_and_lock()
if f.is_locked():
cache = _read_or_initialize_cache(f)
if url in cache:
content, t = cache.get(url, (None, 0))
if _to_timestamp(datetime.datetime.now()) < t + self._max_age:
return content
return None
else:
LOGGER.debug("Could not obtain a lock for the cache file.")
return None
except Exception as e:
LOGGER.warning(e, exc_info=True)
finally:
f.unlock_and_close()
def set(self, url, content):
f = LockedFile(self._file, "r+", "r")
try:
f.open_and_lock()
if f.is_locked():
cache = _read_or_initialize_cache(f)
cache[url] = (content, _to_timestamp(datetime.datetime.now()))
# Remove stale cache.
for k, (_, timestamp) in list(cache.items()):
if (
_to_timestamp(datetime.datetime.now())
>= timestamp + self._max_age
):
del cache[k]
f.file_handle().truncate(0)
f.file_handle().seek(0)
json.dump(cache, f.file_handle())
else:
LOGGER.debug("Could not obtain a lock for the cache file.")
except Exception as e:
LOGGER.warning(e, exc_info=True)
finally:
f.unlock_and_close()
cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)

View file

@ -0,0 +1,180 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
from __future__ import absolute_import
__author__ = "jcgregorio@google.com (Joe Gregorio)"
import json
from googleapiclient import _helpers as util
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
@util.positional(3)
def __init__(self, resp, content, uri=None):
self.resp = resp
if not isinstance(content, bytes):
raise TypeError("HTTP content should be bytes")
self.content = content
self.uri = uri
self.error_details = ""
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
reason = self.resp.reason
try:
data = json.loads(self.content.decode("utf-8"))
if isinstance(data, dict):
reason = data["error"]["message"]
if "details" in data["error"]:
self.error_details = data["error"]["details"]
elif "detail" in data["error"]:
self.error_details = data["error"]["detail"]
elif isinstance(data, list) and len(data) > 0:
first_error = data[0]
reason = first_error["error"]["message"]
if "details" in first_error["error"]:
self.error_details = first_error["error"]["details"]
except (ValueError, KeyError, TypeError):
pass
if reason is None:
reason = ""
return reason
def __repr__(self):
reason = self._get_reason()
if self.error_details:
return '<HttpError %s when requesting %s returned "%s". Details: "%s">' % (
self.resp.status,
self.uri,
reason.strip(),
self.error_details,
)
elif self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status,
self.uri,
self._get_reason().strip(),
)
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownFileType(Error):
"""File type unknown or unexpected."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(HttpError):
"""Error occurred during resumable upload."""
pass
class InvalidChunkSizeError(Error):
"""The given chunksize is not valid."""
pass
class InvalidNotificationError(Error):
"""The channel Notification is invalid."""
pass
class BatchError(HttpError):
"""Error occurred during batch operations."""
@util.positional(2)
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
if getattr(self.resp, "status", None) is None:
return '<BatchError "%s">' % (self.reason)
else:
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
@util.positional(1)
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
"Received unexpected call %s" % methodId
)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
"Expected: [%s] - Provided: [%s]" % (expected, provided)
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,183 @@
# Copyright 2014 Joe Gregorio
#
# Licensed under the MIT License
"""MIME-Type Parser
This module provides basic functions for handling mime-types. It can handle
matching mime-types against a list of media-ranges. See section 14.1 of the
HTTP specification [RFC 2616] for a complete explanation.
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
Contents:
- parse_mime_type(): Parses a mime-type into its component parts.
- parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
quality parameter.
- quality(): Determines the quality ('q') of a mime-type when
compared against a list of media-ranges.
- quality_parsed(): Just like quality() except the second parameter must be
pre-parsed.
- best_match(): Choose the mime-type with the highest quality ('q')
from a list of candidates.
"""
from __future__ import absolute_import
from functools import reduce
import six
__version__ = "0.1.3"
__author__ = "Joe Gregorio"
__email__ = "joe@bitworking.org"
__license__ = "MIT License"
__credits__ = ""
def parse_mime_type(mime_type):
"""Parses a mime-type into its component parts.
Carves up a mime-type and returns a tuple of the (type, subtype, params)
where 'params' is a dictionary of all the parameters for the media range.
For example, the media range 'application/xhtml;q=0.5' would get parsed
into:
('application', 'xhtml', {'q', '0.5'})
"""
parts = mime_type.split(";")
params = dict(
[tuple([s.strip() for s in param.split("=", 1)]) for param in parts[1:]]
)
full_type = parts[0].strip()
# Java URLConnection class sends an Accept header that includes a
# single '*'. Turn it into a legal wildcard.
if full_type == "*":
full_type = "*/*"
(type, subtype) = full_type.split("/")
return (type.strip(), subtype.strip(), params)
def parse_media_range(range):
"""Parse a media-range into its component parts.
Carves up a media range and returns a tuple of the (type, subtype,
params) where 'params' is a dictionary of all the parameters for the media
range. For example, the media range 'application/*;q=0.5' would get parsed
into:
('application', '*', {'q', '0.5'})
In addition this function also guarantees that there is a value for 'q'
in the params dictionary, filling it in with a proper default if
necessary.
"""
(type, subtype, params) = parse_mime_type(range)
if (
"q" not in params
or not params["q"]
or not float(params["q"])
or float(params["q"]) > 1
or float(params["q"]) < 0
):
params["q"] = "1"
return (type, subtype, params)
def fitness_and_quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns a tuple of
the fitness value and the value of the 'q' quality parameter of the best
match, or (-1, 0) if no match was found. Just as for quality_parsed(),
'parsed_ranges' must be a list of parsed media ranges.
"""
best_fitness = -1
best_fit_q = 0
(target_type, target_subtype, target_params) = parse_media_range(mime_type)
for (type, subtype, params) in parsed_ranges:
type_match = type == target_type or type == "*" or target_type == "*"
subtype_match = (
subtype == target_subtype or subtype == "*" or target_subtype == "*"
)
if type_match and subtype_match:
param_matches = reduce(
lambda x, y: x + y,
[
1
for (key, value) in six.iteritems(target_params)
if key != "q" and key in params and value == params[key]
],
0,
)
fitness = (type == target_type) and 100 or 0
fitness += (subtype == target_subtype) and 10 or 0
fitness += param_matches
if fitness > best_fitness:
best_fitness = fitness
best_fit_q = params["q"]
return best_fitness, float(best_fit_q)
def quality_parsed(mime_type, parsed_ranges):
"""Find the best match for a mime-type amongst parsed media-ranges.
Find the best match for a given mime-type against a list of media_ranges
that have already been parsed by parse_media_range(). Returns the 'q'
quality parameter of the best match, 0 if no match was found. This function
bahaves the same as quality() except that 'parsed_ranges' must be a list of
parsed media ranges.
"""
return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
def quality(mime_type, ranges):
"""Return the quality ('q') of a mime-type against a list of media-ranges.
Returns the quality 'q' of a mime-type when compared against the
media-ranges in ranges. For example:
>>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
0.7
"""
parsed_ranges = [parse_media_range(r) for r in ranges.split(",")]
return quality_parsed(mime_type, parsed_ranges)
def best_match(supported, header):
"""Return mime-type with the highest quality ('q') from list of candidates.
Takes a list of supported mime-types and finds the best match for all the
media-ranges listed in header. The value of header must be a string that
conforms to the format of the HTTP Accept: header. The value of 'supported'
is a list of mime-types. The list of supported mime-types should be sorted
in order of increasing desirability, in case of a situation where there is
a tie.
>>> best_match(['application/xbel+xml', 'text/xml'],
'text/*;q=0.5,*/*; q=0.1')
'text/xml'
"""
split_header = _filter_blank(header.split(","))
parsed_header = [parse_media_range(r) for r in split_header]
weighted_matches = []
pos = 0
for mime_type in supported:
weighted_matches.append(
(fitness_and_quality_parsed(mime_type, parsed_header), pos, mime_type)
)
pos += 1
weighted_matches.sort()
return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ""
def _filter_blank(i):
for s in i:
if s.strip():
yield s

View file

@ -0,0 +1,407 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
from __future__ import absolute_import
import six
__author__ = "jcgregorio@google.com (Joe Gregorio)"
import json
import logging
import platform
import pkg_resources
from six.moves.urllib.parse import urlencode
from googleapiclient.errors import HttpError
_LIBRARY_VERSION = pkg_resources.get_distribution("google-api-python-client").version
_PY_VERSION = platform.python_version()
LOGGER = logging.getLogger(__name__)
dump_request_response = False
def _abstract():
raise NotImplementedError("You need to override this function")
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
googleapiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if dump_request_response:
LOGGER.info("--request-start--")
LOGGER.info("-headers-start-")
for h, v in six.iteritems(headers):
LOGGER.info("%s: %s", h, v)
LOGGER.info("-headers-end-")
LOGGER.info("-path-parameters-start-")
for h, v in six.iteritems(path_params):
LOGGER.info("%s: %s", h, v)
LOGGER.info("-path-parameters-end-")
LOGGER.info("body: %s", body)
LOGGER.info("query: %s", query)
LOGGER.info("--request-end--")
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by json.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers["accept"] = self.accept
headers["accept-encoding"] = "gzip, deflate"
if "user-agent" in headers:
headers["user-agent"] += " "
else:
headers["user-agent"] = ""
headers["user-agent"] += "(gzip)"
if "x-goog-api-client" in headers:
headers["x-goog-api-client"] += " "
else:
headers["x-goog-api-client"] = ""
headers["x-goog-api-client"] += "gdcl/%s gl-python/%s" % (
_LIBRARY_VERSION,
_PY_VERSION,
)
if body_value is not None:
headers["content-type"] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({"alt": self.alt_param})
astuples = []
for key, value in six.iteritems(params):
if type(value) == type([]):
for x in value:
x = x.encode("utf-8")
astuples.append((key, x))
else:
if isinstance(value, six.text_type) and callable(value.encode):
value = value.encode("utf-8")
astuples.append((key, value))
return "?" + urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if dump_request_response:
LOGGER.info("--response-start--")
for h, v in six.iteritems(resp):
LOGGER.info("%s: %s", h, v)
if content:
LOGGER.info(content)
LOGGER.info("--response-end--")
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
googleapiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
LOGGER.debug("Content from bad request was: %r" % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = "application/json"
content_type = "application/json"
alt_param = "json"
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (
isinstance(body_value, dict)
and "data" not in body_value
and self._data_wrapper
):
body_value = {"data": body_value}
return json.dumps(body_value)
def deserialize(self, content):
try:
content = content.decode("utf-8")
except AttributeError:
pass
body = json.loads(content)
if self._data_wrapper and isinstance(body, dict) and "data" in body:
body = body["data"]
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = "*/*"
content_type = "application/json"
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ""
class MediaModel(JsonModel):
"""Model class for requests that return Media.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = "*/*"
content_type = "application/json"
alt_param = "media"
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ""
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = "application/x-protobuf"
content_type = "application/x-protobuf"
alt_param = "proto"
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialized protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in six.iteritems(original):
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch

View file

@ -0,0 +1,110 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for making samples.
Consolidates a lot of code commonly repeated in sample applications.
"""
from __future__ import absolute_import
__author__ = "jcgregorio@google.com (Joe Gregorio)"
__all__ = ["init"]
import argparse
import os
from googleapiclient import discovery
from googleapiclient.http import build_http
def init(
argv, name, version, doc, filename, scope=None, parents=[], discovery_filename=None
):
"""A common initialization routine for samples.
Many of the sample applications do the same initialization, which has now
been consolidated into this function. This function uses common idioms found
in almost all the samples, i.e. for an API with name 'apiname', the
credentials are stored in a file named apiname.dat, and the
client_secrets.json file is stored in the same directory as the application
main file.
Args:
argv: list of string, the command-line parameters of the application.
name: string, name of the API.
version: string, version of the API.
doc: string, description of the application. Usually set to __doc__.
file: string, filename of the application. Usually set to __file__.
parents: list of argparse.ArgumentParser, additional command-line flags.
scope: string, The OAuth scope used.
discovery_filename: string, name of local discovery file (JSON). Use when discovery doc not available via URL.
Returns:
A tuple of (service, flags), where service is the service object and flags
is the parsed command-line flags.
"""
try:
from oauth2client import client
from oauth2client import file
from oauth2client import tools
except ImportError:
raise ImportError(
"googleapiclient.sample_tools requires oauth2client. Please install oauth2client and try again."
)
if scope is None:
scope = "https://www.googleapis.com/auth/" + name
# Parser command-line arguments.
parent_parsers = [tools.argparser]
parent_parsers.extend(parents)
parser = argparse.ArgumentParser(
description=doc,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=parent_parsers,
)
flags = parser.parse_args(argv[1:])
# Name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>.
client_secrets = os.path.join(os.path.dirname(filename), "client_secrets.json")
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
client_secrets, scope=scope, message=tools.message_if_missing(client_secrets)
)
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = file.Storage(name + ".dat")
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=build_http())
if discovery_filename is None:
# Construct a service object via the discovery service.
service = discovery.build(name, version, http=http)
else:
# Construct a service object using a local discovery document file.
with open(discovery_filename) as discovery_file:
service = discovery.build_from_document(
discovery_file.read(), base="https://www.googleapis.com/", http=http
)
return (service, flags)

View file

@ -0,0 +1,315 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
from __future__ import absolute_import
import six
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = "jcgregorio@google.com (Joe Gregorio)"
import copy
from googleapiclient import _helpers as util
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get("schemas", {})
# Cache of pretty printed schemas.
self.pretty = {}
@util.positional(2)
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return "# Object with schema name: %s" % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(
self.schemas[name], seen, dent=dent
).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
@util.positional(2)
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name, default=None):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
default: object, return value if name not found.
"""
return self.schemas.get(name, default)
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
@util.positional(3)
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, "\n"])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = "\n" + " " * (self.dent + 2) + "# "
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, " # ", comment, "\n"])
else:
self.value.extend([text, "\n"])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get("type")
if stype == "object":
self.emitEnd("{", schema.get("description", ""))
self.indent()
if "properties" in schema:
for pname, pschema in six.iteritems(schema.get("properties", {})):
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
elif "additionalProperties" in schema:
self.emitBegin('"a_key": ')
self._to_str_impl(schema["additionalProperties"])
self.undent()
self.emit("},")
elif "$ref" in schema:
schemaName = schema["$ref"]
description = schema.get("description", "")
s = self.from_cache(schemaName, seen=self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == "boolean":
value = schema.get("default", "True or False")
self.emitEnd("%s," % str(value), schema.get("description", ""))
elif stype == "string":
value = schema.get("default", "A String")
self.emitEnd('"%s",' % str(value), schema.get("description", ""))
elif stype == "integer":
value = schema.get("default", "42")
self.emitEnd("%s," % str(value), schema.get("description", ""))
elif stype == "number":
value = schema.get("default", "3.14")
self.emitEnd("%s," % str(value), schema.get("description", ""))
elif stype == "null":
self.emitEnd("None,", schema.get("description", ""))
elif stype == "any":
self.emitEnd('"",', schema.get("description", ""))
elif stype == "array":
self.emitEnd("[", schema.get("description"))
self.indent()
self.emitBegin("")
self._to_str_impl(schema["items"])
self.undent()
self.emit("],")
else:
self.emit("Unknown type! %s" % stype)
self.emitEnd("", "")
self.string = "".join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)