Added delete option to database storage.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-12 12:10:01 -04:00
parent 308604a33c
commit 963b5bc68b
1868 changed files with 192402 additions and 13278 deletions

View file

@ -0,0 +1,73 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python idiomatic client for Google Cloud Firestore."""
from pkg_resources import get_distribution
import warnings
__version__ = get_distribution("google-cloud-firestore").version
from google.cloud.firestore_v1beta1 import types
from google.cloud.firestore_v1beta1._helpers import GeoPoint
from google.cloud.firestore_v1beta1._helpers import ExistsOption
from google.cloud.firestore_v1beta1._helpers import LastUpdateOption
from google.cloud.firestore_v1beta1._helpers import ReadAfterWriteError
from google.cloud.firestore_v1beta1._helpers import WriteOption
from google.cloud.firestore_v1beta1.batch import WriteBatch
from google.cloud.firestore_v1beta1.client import Client
from google.cloud.firestore_v1beta1.collection import CollectionReference
from google.cloud.firestore_v1beta1.transforms import ArrayRemove
from google.cloud.firestore_v1beta1.transforms import ArrayUnion
from google.cloud.firestore_v1beta1.transforms import DELETE_FIELD
from google.cloud.firestore_v1beta1.transforms import SERVER_TIMESTAMP
from google.cloud.firestore_v1beta1.document import DocumentReference
from google.cloud.firestore_v1beta1.document import DocumentSnapshot
from google.cloud.firestore_v1beta1.gapic import enums
from google.cloud.firestore_v1beta1.query import Query
from google.cloud.firestore_v1beta1.transaction import Transaction
from google.cloud.firestore_v1beta1.transaction import transactional
from google.cloud.firestore_v1beta1.watch import Watch
_V1BETA1_DEPRECATED_MESSAGE = (
"The 'v1beta1' API endpoint is deprecated. "
"The client/library which supports it will be removed in a future release."
)
warnings.warn(_V1BETA1_DEPRECATED_MESSAGE, DeprecationWarning)
__all__ = [
"__version__",
"ArrayRemove",
"ArrayUnion",
"Client",
"CollectionReference",
"DELETE_FIELD",
"DocumentReference",
"DocumentSnapshot",
"enums",
"ExistsOption",
"GeoPoint",
"LastUpdateOption",
"Query",
"ReadAfterWriteError",
"SERVER_TIMESTAMP",
"Transaction",
"transactional",
"types",
"Watch",
"WriteBatch",
"WriteOption",
]

View file

@ -0,0 +1,998 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common helpers shared across Google Cloud Firestore modules."""
import datetime
from google.protobuf import struct_pb2
from google.type import latlng_pb2
import grpc
import six
from google.cloud import exceptions
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.firestore_v1beta1 import transforms
from google.cloud.firestore_v1beta1 import types
from google.cloud.firestore_v1beta1.field_path import FieldPath
from google.cloud.firestore_v1beta1.field_path import parse_field_path
from google.cloud.firestore_v1beta1.gapic import enums
from google.cloud.firestore_v1beta1.proto import common_pb2
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
BAD_PATH_TEMPLATE = "A path element must be a string. Received {}, which is a {}."
DOCUMENT_PATH_DELIMITER = "/"
INACTIVE_TXN = "Transaction not in progress, cannot be used in API requests."
READ_AFTER_WRITE_ERROR = "Attempted read after write in a transaction."
BAD_REFERENCE_ERROR = (
"Reference value {!r} in unexpected format, expected to be of the form "
"``projects/{{project}}/databases/{{database}}/"
"documents/{{document_path}}``."
)
WRONG_APP_REFERENCE = (
"Document {!r} does not correspond to the same database " "({!r}) as the client."
)
REQUEST_TIME_ENUM = enums.DocumentTransform.FieldTransform.ServerValue.REQUEST_TIME
_GRPC_ERROR_MAPPING = {
grpc.StatusCode.ALREADY_EXISTS: exceptions.Conflict,
grpc.StatusCode.NOT_FOUND: exceptions.NotFound,
}
class GeoPoint(object):
"""Simple container for a geo point value.
Args:
latitude (float): Latitude of a point.
longitude (float): Longitude of a point.
"""
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def to_protobuf(self):
"""Convert the current object to protobuf.
Returns:
google.type.latlng_pb2.LatLng: The current point as a protobuf.
"""
return latlng_pb2.LatLng(latitude=self.latitude, longitude=self.longitude)
def __eq__(self, other):
"""Compare two geo points for equality.
Returns:
Union[bool, NotImplemented]: :data:`True` if the points compare
equal, else :data:`False`. (Or :data:`NotImplemented` if
``other`` is not a geo point.)
"""
if not isinstance(other, GeoPoint):
return NotImplemented
return self.latitude == other.latitude and self.longitude == other.longitude
def __ne__(self, other):
"""Compare two geo points for inequality.
Returns:
Union[bool, NotImplemented]: :data:`False` if the points compare
equal, else :data:`True`. (Or :data:`NotImplemented` if
``other`` is not a geo point.)
"""
equality_val = self.__eq__(other)
if equality_val is NotImplemented:
return NotImplemented
else:
return not equality_val
def verify_path(path, is_collection):
"""Verifies that a ``path`` has the correct form.
Checks that all of the elements in ``path`` are strings.
Args:
path (Tuple[str, ...]): The components in a collection or
document path.
is_collection (bool): Indicates if the ``path`` represents
a document or a collection.
Raises:
ValueError: if
* the ``path`` is empty
* ``is_collection=True`` and there are an even number of elements
* ``is_collection=False`` and there are an odd number of elements
* an element is not a string
"""
num_elements = len(path)
if num_elements == 0:
raise ValueError("Document or collection path cannot be empty")
if is_collection:
if num_elements % 2 == 0:
raise ValueError("A collection must have an odd number of path elements")
else:
if num_elements % 2 == 1:
raise ValueError("A document must have an even number of path elements")
for element in path:
if not isinstance(element, six.string_types):
msg = BAD_PATH_TEMPLATE.format(element, type(element))
raise ValueError(msg)
def encode_value(value):
"""Converts a native Python value into a Firestore protobuf ``Value``.
Args:
value (Union[NoneType, bool, int, float, datetime.datetime, \
str, bytes, dict, ~google.cloud.Firestore.GeoPoint]): A native
Python value to convert to a protobuf field.
Returns:
~google.cloud.firestore_v1beta1.types.Value: A
value encoded as a Firestore protobuf.
Raises:
TypeError: If the ``value`` is not one of the accepted types.
"""
if value is None:
return document_pb2.Value(null_value=struct_pb2.NULL_VALUE)
# Must come before six.integer_types since ``bool`` is an integer subtype.
if isinstance(value, bool):
return document_pb2.Value(boolean_value=value)
if isinstance(value, six.integer_types):
return document_pb2.Value(integer_value=value)
if isinstance(value, float):
return document_pb2.Value(double_value=value)
if isinstance(value, DatetimeWithNanoseconds):
return document_pb2.Value(timestamp_value=value.timestamp_pb())
if isinstance(value, datetime.datetime):
return document_pb2.Value(timestamp_value=_datetime_to_pb_timestamp(value))
if isinstance(value, six.text_type):
return document_pb2.Value(string_value=value)
if isinstance(value, six.binary_type):
return document_pb2.Value(bytes_value=value)
# NOTE: We avoid doing an isinstance() check for a Document
# here to avoid import cycles.
document_path = getattr(value, "_document_path", None)
if document_path is not None:
return document_pb2.Value(reference_value=document_path)
if isinstance(value, GeoPoint):
return document_pb2.Value(geo_point_value=value.to_protobuf())
if isinstance(value, list):
value_list = [encode_value(element) for element in value]
value_pb = document_pb2.ArrayValue(values=value_list)
return document_pb2.Value(array_value=value_pb)
if isinstance(value, dict):
value_dict = encode_dict(value)
value_pb = document_pb2.MapValue(fields=value_dict)
return document_pb2.Value(map_value=value_pb)
raise TypeError(
"Cannot convert to a Firestore Value", value, "Invalid type", type(value)
)
def encode_dict(values_dict):
"""Encode a dictionary into protobuf ``Value``-s.
Args:
values_dict (dict): The dictionary to encode as protobuf fields.
Returns:
Dict[str, ~google.cloud.firestore_v1beta1.types.Value]: A
dictionary of string keys and ``Value`` protobufs as dictionary
values.
"""
return {key: encode_value(value) for key, value in six.iteritems(values_dict)}
def reference_value_to_document(reference_value, client):
"""Convert a reference value string to a document.
Args:
reference_value (str): A document reference value.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
~.firestore_v1beta1.document.DocumentReference: The document
corresponding to ``reference_value``.
Raises:
ValueError: If the ``reference_value`` is not of the expected
format: ``projects/{project}/databases/{database}/documents/...``.
ValueError: If the ``reference_value`` does not come from the same
project / database combination as the ``client``.
"""
# The first 5 parts are
# projects, {project}, databases, {database}, documents
parts = reference_value.split(DOCUMENT_PATH_DELIMITER, 5)
if len(parts) != 6:
msg = BAD_REFERENCE_ERROR.format(reference_value)
raise ValueError(msg)
# The sixth part is `a/b/c/d` (i.e. the document path)
document = client.document(parts[-1])
if document._document_path != reference_value:
msg = WRONG_APP_REFERENCE.format(reference_value, client._database_string)
raise ValueError(msg)
return document
def decode_value(value, client):
"""Converts a Firestore protobuf ``Value`` to a native Python value.
Args:
value (google.cloud.firestore_v1beta1.types.Value): A
Firestore protobuf to be decoded / parsed / converted.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
Union[NoneType, bool, int, float, datetime.datetime, \
str, bytes, dict, ~google.cloud.Firestore.GeoPoint]: A native
Python value converted from the ``value``.
Raises:
NotImplementedError: If the ``value_type`` is ``reference_value``.
ValueError: If the ``value_type`` is unknown.
"""
value_type = value.WhichOneof("value_type")
if value_type == "null_value":
return None
elif value_type == "boolean_value":
return value.boolean_value
elif value_type == "integer_value":
return value.integer_value
elif value_type == "double_value":
return value.double_value
elif value_type == "timestamp_value":
return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value)
elif value_type == "string_value":
return value.string_value
elif value_type == "bytes_value":
return value.bytes_value
elif value_type == "reference_value":
return reference_value_to_document(value.reference_value, client)
elif value_type == "geo_point_value":
return GeoPoint(value.geo_point_value.latitude, value.geo_point_value.longitude)
elif value_type == "array_value":
return [decode_value(element, client) for element in value.array_value.values]
elif value_type == "map_value":
return decode_dict(value.map_value.fields, client)
else:
raise ValueError("Unknown ``value_type``", value_type)
def decode_dict(value_fields, client):
"""Converts a protobuf map of Firestore ``Value``-s.
Args:
value_fields (google.protobuf.pyext._message.MessageMapContainer): A
protobuf map of Firestore ``Value``-s.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
Dict[str, Union[NoneType, bool, int, float, datetime.datetime, \
str, bytes, dict, ~google.cloud.Firestore.GeoPoint]]: A dictionary
of native Python values converted from the ``value_fields``.
"""
return {
key: decode_value(value, client) for key, value in six.iteritems(value_fields)
}
def get_doc_id(document_pb, expected_prefix):
"""Parse a document ID from a document protobuf.
Args:
document_pb (google.cloud.proto.firestore.v1beta1.\
document_pb2.Document): A protobuf for a document that
was created in a ``CreateDocument`` RPC.
expected_prefix (str): The expected collection prefix for the
fully-qualified document name.
Returns:
str: The document ID from the protobuf.
Raises:
ValueError: If the name does not begin with the prefix.
"""
prefix, document_id = document_pb.name.rsplit(DOCUMENT_PATH_DELIMITER, 1)
if prefix != expected_prefix:
raise ValueError(
"Unexpected document name",
document_pb.name,
"Expected to begin with",
expected_prefix,
)
return document_id
_EmptyDict = transforms.Sentinel("Marker for an empty dict value")
def extract_fields(document_data, prefix_path, expand_dots=False):
"""Do depth-first walk of tree, yielding field_path, value"""
if not document_data:
yield prefix_path, _EmptyDict
else:
for key, value in sorted(six.iteritems(document_data)):
if expand_dots:
sub_key = FieldPath.from_string(key)
else:
sub_key = FieldPath(key)
field_path = FieldPath(*(prefix_path.parts + sub_key.parts))
if isinstance(value, dict):
for s_path, s_value in extract_fields(value, field_path):
yield s_path, s_value
else:
yield field_path, value
def set_field_value(document_data, field_path, value):
"""Set a value into a document for a field_path"""
current = document_data
for element in field_path.parts[:-1]:
current = current.setdefault(element, {})
if value is _EmptyDict:
value = {}
current[field_path.parts[-1]] = value
def get_field_value(document_data, field_path):
if not field_path.parts:
raise ValueError("Empty path")
current = document_data
for element in field_path.parts[:-1]:
current = current[element]
return current[field_path.parts[-1]]
class DocumentExtractor(object):
""" Break document data up into actual data and transforms.
Handle special values such as ``DELETE_FIELD``, ``SERVER_TIMESTAMP``.
Args:
document_data (dict):
Property names and values to use for sending a change to
a document.
"""
def __init__(self, document_data):
self.document_data = document_data
self.field_paths = []
self.deleted_fields = []
self.server_timestamps = []
self.array_removes = {}
self.array_unions = {}
self.set_fields = {}
self.empty_document = False
prefix_path = FieldPath()
iterator = self._get_document_iterator(prefix_path)
for field_path, value in iterator:
if field_path == prefix_path and value is _EmptyDict:
self.empty_document = True
elif value is transforms.DELETE_FIELD:
self.deleted_fields.append(field_path)
elif value is transforms.SERVER_TIMESTAMP:
self.server_timestamps.append(field_path)
elif isinstance(value, transforms.ArrayRemove):
self.array_removes[field_path] = value.values
elif isinstance(value, transforms.ArrayUnion):
self.array_unions[field_path] = value.values
else:
self.field_paths.append(field_path)
set_field_value(self.set_fields, field_path, value)
def _get_document_iterator(self, prefix_path):
return extract_fields(self.document_data, prefix_path)
@property
def has_transforms(self):
return bool(self.server_timestamps or self.array_removes or self.array_unions)
@property
def transform_paths(self):
return sorted(
self.server_timestamps + list(self.array_removes) + list(self.array_unions)
)
def _get_update_mask(self, allow_empty_mask=False):
return None
def get_update_pb(self, document_path, exists=None, allow_empty_mask=False):
if exists is not None:
current_document = common_pb2.Precondition(exists=exists)
else:
current_document = None
update_pb = write_pb2.Write(
update=document_pb2.Document(
name=document_path, fields=encode_dict(self.set_fields)
),
update_mask=self._get_update_mask(allow_empty_mask),
current_document=current_document,
)
return update_pb
def get_transform_pb(self, document_path, exists=None):
def make_array_value(values):
value_list = [encode_value(element) for element in values]
return document_pb2.ArrayValue(values=value_list)
path_field_transforms = (
[
(
path,
write_pb2.DocumentTransform.FieldTransform(
field_path=path.to_api_repr(),
set_to_server_value=REQUEST_TIME_ENUM,
),
)
for path in self.server_timestamps
]
+ [
(
path,
write_pb2.DocumentTransform.FieldTransform(
field_path=path.to_api_repr(),
remove_all_from_array=make_array_value(values),
),
)
for path, values in self.array_removes.items()
]
+ [
(
path,
write_pb2.DocumentTransform.FieldTransform(
field_path=path.to_api_repr(),
append_missing_elements=make_array_value(values),
),
)
for path, values in self.array_unions.items()
]
)
field_transforms = [
transform for path, transform in sorted(path_field_transforms)
]
transform_pb = write_pb2.Write(
transform=write_pb2.DocumentTransform(
document=document_path, field_transforms=field_transforms
)
)
if exists is not None:
transform_pb.current_document.CopyFrom(
common_pb2.Precondition(exists=exists)
)
return transform_pb
def pbs_for_create(document_path, document_data):
"""Make ``Write`` protobufs for ``create()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
creating a document.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One or two
``Write`` protobuf instances for ``create()``.
"""
extractor = DocumentExtractor(document_data)
if extractor.deleted_fields:
raise ValueError("Cannot apply DELETE_FIELD in a create request.")
write_pbs = []
# Conformance tests require skipping the 'update_pb' if the document
# contains only transforms.
if extractor.empty_document or extractor.set_fields:
write_pbs.append(extractor.get_update_pb(document_path, exists=False))
if extractor.has_transforms:
exists = None if write_pbs else False
transform_pb = extractor.get_transform_pb(document_path, exists)
write_pbs.append(transform_pb)
return write_pbs
def pbs_for_set_no_merge(document_path, document_data):
"""Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``.
"""
extractor = DocumentExtractor(document_data)
if extractor.deleted_fields:
raise ValueError(
"Cannot apply DELETE_FIELD in a set request without "
"specifying 'merge=True' or 'merge=[field_paths]'."
)
# Conformance tests require send the 'update_pb' even if the document
# contains only transforms.
write_pbs = [extractor.get_update_pb(document_path)]
if extractor.has_transforms:
transform_pb = extractor.get_transform_pb(document_path)
write_pbs.append(transform_pb)
return write_pbs
class DocumentExtractorForMerge(DocumentExtractor):
""" Break document data up into actual data and transforms.
"""
def __init__(self, document_data):
super(DocumentExtractorForMerge, self).__init__(document_data)
self.data_merge = []
self.transform_merge = []
self.merge = []
@property
def has_updates(self):
# for whatever reason, the conformance tests want to see the parent
# of nested transform paths in the update mask
# (see set-st-merge-nonleaf-alone.textproto)
update_paths = set(self.data_merge)
for transform_path in self.transform_paths:
if len(transform_path.parts) > 1:
parent_fp = FieldPath(*transform_path.parts[:-1])
update_paths.add(parent_fp)
return bool(update_paths)
def _apply_merge_all(self):
self.data_merge = sorted(self.field_paths + self.deleted_fields)
# TODO: other transforms
self.transform_merge = self.transform_paths
self.merge = sorted(self.data_merge + self.transform_paths)
def _construct_merge_paths(self, merge):
for merge_field in merge:
if isinstance(merge_field, FieldPath):
yield merge_field
else:
yield FieldPath(*parse_field_path(merge_field))
def _normalize_merge_paths(self, merge):
merge_paths = sorted(self._construct_merge_paths(merge))
# Raise if any merge path is a parent of another. Leverage sorting
# to avoid quadratic behavior.
for index in range(len(merge_paths) - 1):
lhs, rhs = merge_paths[index], merge_paths[index + 1]
if lhs.eq_or_parent(rhs):
raise ValueError("Merge paths overlap: {}, {}".format(lhs, rhs))
for merge_path in merge_paths:
if merge_path in self.deleted_fields:
continue
try:
get_field_value(self.document_data, merge_path)
except KeyError:
raise ValueError("Invalid merge path: {}".format(merge_path))
return merge_paths
def _apply_merge_paths(self, merge):
if self.empty_document:
raise ValueError("Cannot merge specific fields with empty document.")
merge_paths = self._normalize_merge_paths(merge)
del self.data_merge[:]
del self.transform_merge[:]
self.merge = merge_paths
for merge_path in merge_paths:
if merge_path in self.transform_paths:
self.transform_merge.append(merge_path)
for field_path in self.field_paths:
if merge_path.eq_or_parent(field_path):
self.data_merge.append(field_path)
# Clear out data for fields not merged.
merged_set_fields = {}
for field_path in self.data_merge:
value = get_field_value(self.document_data, field_path)
set_field_value(merged_set_fields, field_path, value)
self.set_fields = merged_set_fields
unmerged_deleted_fields = [
field_path
for field_path in self.deleted_fields
if field_path not in self.merge
]
if unmerged_deleted_fields:
raise ValueError(
"Cannot delete unmerged fields: {}".format(unmerged_deleted_fields)
)
self.data_merge = sorted(self.data_merge + self.deleted_fields)
# Keep only transforms which are within merge.
merged_transform_paths = set()
for merge_path in self.merge:
tranform_merge_paths = [
transform_path
for transform_path in self.transform_paths
if merge_path.eq_or_parent(transform_path)
]
merged_transform_paths.update(tranform_merge_paths)
self.server_timestamps = [
path for path in self.server_timestamps if path in merged_transform_paths
]
self.array_removes = {
path: values
for path, values in self.array_removes.items()
if path in merged_transform_paths
}
self.array_unions = {
path: values
for path, values in self.array_unions.items()
if path in merged_transform_paths
}
def apply_merge(self, merge):
if merge is True: # merge all fields
self._apply_merge_all()
else:
self._apply_merge_paths(merge)
def _get_update_mask(self, allow_empty_mask=False):
# Mask uses dotted / quoted paths.
mask_paths = [
field_path.to_api_repr()
for field_path in self.merge
if field_path not in self.transform_merge
]
if mask_paths or allow_empty_mask:
return common_pb2.DocumentMask(field_paths=mask_paths)
def pbs_for_set_with_merge(document_path, document_data, merge):
"""Make ``Write`` protobufs for ``set()`` methods.
Args:
document_path (str): A fully-qualified document path.
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, merge all fields; else, merge only the named fields.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``set()``.
"""
extractor = DocumentExtractorForMerge(document_data)
extractor.apply_merge(merge)
merge_empty = not document_data
write_pbs = []
if extractor.has_updates or merge_empty:
write_pbs.append(
extractor.get_update_pb(document_path, allow_empty_mask=merge_empty)
)
if extractor.transform_paths:
transform_pb = extractor.get_transform_pb(document_path)
write_pbs.append(transform_pb)
return write_pbs
class DocumentExtractorForUpdate(DocumentExtractor):
""" Break document data up into actual data and transforms.
"""
def __init__(self, document_data):
super(DocumentExtractorForUpdate, self).__init__(document_data)
self.top_level_paths = sorted(
[FieldPath.from_string(key) for key in document_data]
)
tops = set(self.top_level_paths)
for top_level_path in self.top_level_paths:
for ancestor in top_level_path.lineage():
if ancestor in tops:
raise ValueError(
"Conflicting field path: {}, {}".format(
top_level_path, ancestor
)
)
for field_path in self.deleted_fields:
if field_path not in tops:
raise ValueError(
"Cannot update with nest delete: {}".format(field_path)
)
def _get_document_iterator(self, prefix_path):
return extract_fields(self.document_data, prefix_path, expand_dots=True)
def _get_update_mask(self, allow_empty_mask=False):
mask_paths = []
for field_path in self.top_level_paths:
if field_path not in self.transform_paths:
mask_paths.append(field_path.to_api_repr())
return common_pb2.DocumentMask(field_paths=mask_paths)
def pbs_for_update(document_path, field_updates, option):
"""Make ``Write`` protobufs for ``update()`` methods.
Args:
document_path (str): A fully-qualified document path.
field_updates (dict): Field names or paths to update and values
to update with.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
List[google.cloud.firestore_v1beta1.types.Write]: One
or two ``Write`` protobuf instances for ``update()``.
"""
extractor = DocumentExtractorForUpdate(field_updates)
if extractor.empty_document:
raise ValueError("Cannot update with an empty document.")
if option is None: # Default is to use ``exists=True``.
option = ExistsOption(exists=True)
write_pbs = []
if extractor.field_paths or extractor.deleted_fields:
update_pb = extractor.get_update_pb(document_path)
option.modify_write(update_pb)
write_pbs.append(update_pb)
if extractor.has_transforms:
transform_pb = extractor.get_transform_pb(document_path)
if not write_pbs:
# NOTE: set the write option on the ``transform_pb`` only if there
# is no ``update_pb``
option.modify_write(transform_pb)
write_pbs.append(transform_pb)
return write_pbs
def pb_for_delete(document_path, option):
"""Make a ``Write`` protobuf for ``delete()`` methods.
Args:
document_path (str): A fully-qualified document path.
option (optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.Write: A
``Write`` protobuf instance for the ``delete()``.
"""
write_pb = write_pb2.Write(delete=document_path)
if option is not None:
option.modify_write(write_pb)
return write_pb
class ReadAfterWriteError(Exception):
"""Raised when a read is attempted after a write.
Raised by "read" methods that use transactions.
"""
def get_transaction_id(transaction, read_operation=True):
"""Get the transaction ID from a ``Transaction`` object.
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this query will
run in.
read_operation (Optional[bool]): Indicates if the transaction ID
will be used in a read operation. Defaults to :data:`True`.
Returns:
Optional[bytes]: The ID of the transaction, or :data:`None` if the
``transaction`` is :data:`None`.
Raises:
ValueError: If the ``transaction`` is not in progress (only if
``transaction`` is not :data:`None`).
ReadAfterWriteError: If the ``transaction`` has writes stored on
it and ``read_operation`` is :data:`True`.
"""
if transaction is None:
return None
else:
if not transaction.in_progress:
raise ValueError(INACTIVE_TXN)
if read_operation and len(transaction._write_pbs) > 0:
raise ReadAfterWriteError(READ_AFTER_WRITE_ERROR)
return transaction.id
def metadata_with_prefix(prefix, **kw):
"""Create RPC metadata containing a prefix.
Args:
prefix (str): appropriate resource path.
Returns:
List[Tuple[str, str]]: RPC metadata with supplied prefix
"""
return [("google-cloud-resource-prefix", prefix)]
class WriteOption(object):
"""Option used to assert a condition on a write operation."""
def modify_write(self, write_pb, no_create_msg=None):
"""Modify a ``Write`` protobuf based on the state of this write option.
This is a virtual method intended to be implemented by subclasses.
Args:
write_pb (google.cloud.firestore_v1beta1.types.Write): A
``Write`` protobuf instance to be modified with a precondition
determined by the state of this option.
no_create_msg (Optional[str]): A message to use to indicate that
a create operation is not allowed.
Raises:
NotImplementedError: Always, this method is virtual.
"""
raise NotImplementedError
class LastUpdateOption(WriteOption):
"""Option used to assert a "last update" condition on a write operation.
This will typically be created by
:meth:`~google.cloud.firestore_v1beta1.client.Client.write_option`.
Args:
last_update_time (google.protobuf.timestamp_pb2.Timestamp): A
timestamp. When set, the target document must exist and have
been last updated at that time. Protobuf ``update_time`` timestamps
are typically returned from methods that perform write operations
as part of a "write result" protobuf or directly.
"""
def __init__(self, last_update_time):
self._last_update_time = last_update_time
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._last_update_time == other._last_update_time
def modify_write(self, write_pb, **unused_kwargs):
"""Modify a ``Write`` protobuf based on the state of this write option.
The ``last_update_time`` is added to ``write_pb`` as an "update time"
precondition. When set, the target document must exist and have been
last updated at that time.
Args:
write_pb (google.cloud.firestore_v1beta1.types.Write): A
``Write`` protobuf instance to be modified with a precondition
determined by the state of this option.
unused_kwargs (Dict[str, Any]): Keyword arguments accepted by
other subclasses that are unused here.
"""
current_doc = types.Precondition(update_time=self._last_update_time)
write_pb.current_document.CopyFrom(current_doc)
class ExistsOption(WriteOption):
"""Option used to assert existence on a write operation.
This will typically be created by
:meth:`~google.cloud.firestore_v1beta1.client.Client.write_option`.
Args:
exists (bool): Indicates if the document being modified
should already exist.
"""
def __init__(self, exists):
self._exists = exists
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._exists == other._exists
def modify_write(self, write_pb, **unused_kwargs):
"""Modify a ``Write`` protobuf based on the state of this write option.
If:
* ``exists=True``, adds a precondition that requires existence
* ``exists=False``, adds a precondition that requires non-existence
Args:
write_pb (google.cloud.firestore_v1beta1.types.Write): A
``Write`` protobuf instance to be modified with a precondition
determined by the state of this option.
unused_kwargs (Dict[str, Any]): Keyword arguments accepted by
other subclasses that are unused here.
"""
current_doc = types.Precondition(exists=self._exists)
write_pb.current_document.CopyFrom(current_doc)

View file

@ -0,0 +1,162 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for batch requests to the Google Cloud Firestore API."""
from google.cloud.firestore_v1beta1 import _helpers
class WriteBatch(object):
"""Accumulate write operations to be sent in a batch.
This has the same set of methods for write operations that
:class:`~google.cloud.firestore_v1beta1.document.DocumentReference`
does, e.g.
:meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.create`.
Args:
client (~.firestore_v1beta1.client.Client): The client that
created this batch.
"""
def __init__(self, client):
self._client = client
self._write_pbs = []
self.write_results = None
self.commit_time = None
def _add_write_pbs(self, write_pbs):
"""Add `Write`` protobufs to this transaction.
This method intended to be over-ridden by subclasses.
Args:
write_pbs (List[google.cloud.proto.firestore.v1beta1.\
write_pb2.Write]): A list of write protobufs to be added.
"""
self._write_pbs.extend(write_pbs)
def create(self, reference, document_data):
"""Add a "change" to this batch to create a document.
If the document given by ``reference`` already exists, then this
batch will fail when :meth:`commit`-ed.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference to be created in this batch.
document_data (dict): Property names and values to use for
creating a document.
"""
write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
self._add_write_pbs(write_pbs)
def set(self, reference, document_data, merge=False):
"""Add a "change" to replace a document.
See
:meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.set`
for more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference):
A document reference that will have values set in this batch.
document_data (dict):
Property names and values to use for replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
"""
if merge is not False:
write_pbs = _helpers.pbs_for_set_with_merge(
reference._document_path, document_data, merge
)
else:
write_pbs = _helpers.pbs_for_set_no_merge(
reference._document_path, document_data
)
self._add_write_pbs(write_pbs)
def update(self, reference, field_updates, option=None):
"""Add a "change" to update a document.
See
:meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.update`
for more information on ``field_updates`` and ``option``.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
if option.__class__.__name__ == "ExistsOption":
raise ValueError("you must not pass an explicit write option to " "update.")
write_pbs = _helpers.pbs_for_update(
reference._document_path, field_updates, option
)
self._add_write_pbs(write_pbs)
def delete(self, reference, option=None):
"""Add a "change" to delete a document.
See
:meth:`~google.cloud.firestore_v1beta1.document.DocumentReference.delete`
for more information on how ``option`` determines how the change is
applied.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference that will be deleted in this batch.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
"""
write_pb = _helpers.pb_for_delete(reference._document_path, option)
self._add_write_pbs([write_pb])
def commit(self):
"""Commit the changes accumulated in this batch.
Returns:
List[google.cloud.proto.firestore.v1beta1.\
write_pb2.WriteResult, ...]: The write results corresponding
to the changes committed, returned in the same order as the
changes were applied to this batch. A write result contains an
``update_time`` field.
"""
commit_response = self._client._firestore_api.commit(
self._client._database_string,
self._write_pbs,
transaction=None,
metadata=self._client._rpc_metadata,
)
self._write_pbs = []
self.write_results = results = list(commit_response.write_results)
self.commit_time = commit_response.commit_time
return results
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()

View file

@ -0,0 +1,542 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Firestore API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~google.cloud.firestore_v1beta1.client.Client` owns a
:class:`~google.cloud.firestore_v1beta1.collection.CollectionReference`
* a :class:`~google.cloud.firestore_v1beta1.client.Client` owns a
:class:`~google.cloud.firestore_v1beta1.document.DocumentReference`
"""
import warnings
from google.cloud.client import ClientWithProject
from google.cloud.firestore_v1beta1 import _helpers
from google.cloud.firestore_v1beta1 import types
from google.cloud.firestore_v1beta1.batch import WriteBatch
from google.cloud.firestore_v1beta1.collection import CollectionReference
from google.cloud.firestore_v1beta1.document import DocumentReference
from google.cloud.firestore_v1beta1.document import DocumentSnapshot
from google.cloud.firestore_v1beta1.field_path import render_field_path
from google.cloud.firestore_v1beta1.gapic import firestore_client
from google.cloud.firestore_v1beta1.gapic.transports import firestore_grpc_transport
from google.cloud.firestore_v1beta1.transaction import Transaction
DEFAULT_DATABASE = "(default)"
"""str: The default database used in a :class:`~google.cloud.firestore.client.Client`."""
_BAD_OPTION_ERR = (
"Exactly one of ``last_update_time`` or ``exists`` " "must be provided."
)
_BAD_DOC_TEMPLATE = (
"Document {!r} appeared in response but was not present among references"
)
_ACTIVE_TXN = "There is already an active transaction."
_INACTIVE_TXN = "There is no active transaction."
_V1BETA1_DEPRECATED_MESSAGE = (
"The 'v1beta1' API endpoint is deprecated. "
"The client/library which supports it will be removed in a future release."
)
class Client(ClientWithProject):
"""Client for interacting with Google Cloud Firestore API.
.. note::
Since the Cloud Firestore API requires the gRPC transport, no
``_http`` argument is accepted by this class.
Args:
project (Optional[str]): The project which the client acts on behalf
of. If not passed, falls back to the default inferred
from the environment.
credentials (Optional[~google.auth.credentials.Credentials]): The
OAuth2 Credentials to use for this client. If not passed, falls
back to the default inferred from the environment.
database (Optional[str]): The database name that the client targets.
For now, :attr:`DEFAULT_DATABASE` (the default value) is the
only valid database.
"""
SCOPE = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
)
"""The scopes required for authenticating with the Firestore service."""
_firestore_api_internal = None
_database_string_internal = None
_rpc_metadata_internal = None
def __init__(self, project=None, credentials=None, database=DEFAULT_DATABASE):
warnings.warn(_V1BETA1_DEPRECATED_MESSAGE, DeprecationWarning, stacklevel=2)
# NOTE: This API has no use for the _http argument, but sending it
# will have no impact since the _http() @property only lazily
# creates a working HTTP object.
super(Client, self).__init__(
project=project, credentials=credentials, _http=None
)
self._database = database
@property
def _firestore_api(self):
"""Lazy-loading getter GAPIC Firestore API.
Returns:
~.gapic.firestore.v1beta1.firestore_client.FirestoreClient: The
GAPIC client with the credentials of the current client.
"""
if self._firestore_api_internal is None:
# Use a custom channel.
# We need this in order to set appropriate keepalive options.
channel = firestore_grpc_transport.FirestoreGrpcTransport.create_channel(
self._target,
credentials=self._credentials,
options={"grpc.keepalive_time_ms": 30000}.items(),
)
self._transport = firestore_grpc_transport.FirestoreGrpcTransport(
address=self._target, channel=channel
)
self._firestore_api_internal = firestore_client.FirestoreClient(
transport=self._transport
)
return self._firestore_api_internal
@property
def _target(self):
"""Return the target (where the API is).
Returns:
str: The location of the API.
"""
return firestore_client.FirestoreClient.SERVICE_ADDRESS
@property
def _database_string(self):
"""The database string corresponding to this client's project.
This value is lazy-loaded and cached.
Will be of the form
``projects/{project_id}/databases/{database_id}``
but ``database_id == '(default)'`` for the time being.
Returns:
str: The fully-qualified database string for the current
project. (The default database is also in this string.)
"""
if self._database_string_internal is None:
# NOTE: database_root_path() is a classmethod, so we don't use
# self._firestore_api (it isn't necessary).
db_str = firestore_client.FirestoreClient.database_root_path(
self.project, self._database
)
self._database_string_internal = db_str
return self._database_string_internal
@property
def _rpc_metadata(self):
"""The RPC metadata for this client's associated database.
Returns:
Sequence[Tuple(str, str)]: RPC metadata with resource prefix
for the database associated with this client.
"""
if self._rpc_metadata_internal is None:
self._rpc_metadata_internal = _helpers.metadata_with_prefix(
self._database_string
)
return self._rpc_metadata_internal
def collection(self, *collection_path):
"""Get a reference to a collection.
For a top-level collection:
.. code-block:: python
>>> client.collection('top')
For a sub-collection:
.. code-block:: python
>>> client.collection('mydocs/doc/subcol')
>>> # is the same as
>>> client.collection('mydocs', 'doc', 'subcol')
Sub-collections can be nested deeper in a similar fashion.
Args:
collection_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a collection
* A tuple of collection path segments
Returns:
~.firestore_v1beta1.collection.CollectionReference: A reference
to a collection in the Firestore database.
"""
if len(collection_path) == 1:
path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = collection_path
return CollectionReference(*path, client=self)
def document(self, *document_path):
"""Get a reference to a document in a collection.
For a top-level document:
.. code-block:: python
>>> client.document('collek/shun')
>>> # is the same as
>>> client.document('collek', 'shun')
For a document in a sub-collection:
.. code-block:: python
>>> client.document('mydocs/doc/subcol/child')
>>> # is the same as
>>> client.document('mydocs', 'doc', 'subcol', 'child')
Documents in sub-collections can be nested deeper in a similar fashion.
Args:
document_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a document
* A tuple of document path segments
Returns:
~.firestore_v1beta1.document.DocumentReference: A reference
to a document in a collection.
"""
if len(document_path) == 1:
path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
else:
path = document_path
return DocumentReference(*path, client=self)
@staticmethod
def field_path(*field_names):
"""Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block:: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents the data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Tuple[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
"""
return render_field_path(field_names)
@staticmethod
def write_option(**kwargs):
"""Create a write option for write operations.
Write operations include :meth:`~google.cloud.DocumentReference.set`,
:meth:`~google.cloud.DocumentReference.update` and
:meth:`~google.cloud.DocumentReference.delete`.
One of the following keyword arguments must be provided:
* ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\
Timestamp`): A timestamp. When set, the target document must
exist and have been last updated at that time. Protobuf
``update_time`` timestamps are typically returned from methods
that perform write operations as part of a "write result"
protobuf or directly.
* ``exists`` (:class:`bool`): Indicates if the document being modified
should already exist.
Providing no argument would make the option have no effect (so
it is not allowed). Providing multiple would be an apparent
contradiction, since ``last_update_time`` assumes that the
document **was** updated (it can't have been updated if it
doesn't exist) and ``exists`` indicate that it is unknown if the
document exists or not.
Args:
kwargs (Dict[str, Any]): The keyword arguments described above.
Raises:
TypeError: If anything other than exactly one argument is
provided by the caller.
"""
if len(kwargs) != 1:
raise TypeError(_BAD_OPTION_ERR)
name, value = kwargs.popitem()
if name == "last_update_time":
return _helpers.LastUpdateOption(value)
elif name == "exists":
return _helpers.ExistsOption(value)
else:
extra = "{!r} was provided".format(name)
raise TypeError(_BAD_OPTION_ERR, extra)
def get_all(self, references, field_paths=None, transaction=None):
"""Retrieve a batch of documents.
.. note::
Documents returned by this method are not guaranteed to be
returned in the same order that they are given in ``references``.
.. note::
If multiple ``references`` refer to the same document, the server
will only return one result.
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`
for more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
references (List[.DocumentReference, ...]): Iterable of document
references to be retrieved.
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that these
``references`` will be retrieved in.
Yields:
.DocumentSnapshot: The next document snapshot that fulfills the
query, or :data:`None` if the document does not exist.
"""
document_paths, reference_map = _reference_info(references)
mask = _get_doc_mask(field_paths)
response_iterator = self._firestore_api.batch_get_documents(
self._database_string,
document_paths,
mask,
transaction=_helpers.get_transaction_id(transaction),
metadata=self._rpc_metadata,
)
for get_doc_response in response_iterator:
yield _parse_batch_get(get_doc_response, reference_map, self)
def collections(self):
"""List top-level collections of the client's database.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document.
"""
iterator = self._firestore_api.list_collection_ids(
self._database_string, metadata=self._rpc_metadata
)
iterator.client = self
iterator.item_to_value = _item_to_collection_ref
return iterator
def batch(self):
"""Get a batch instance from this client.
Returns:
~.firestore_v1beta1.batch.WriteBatch: A "write" batch to be
used for accumulating document changes and sending the changes
all at once.
"""
return WriteBatch(self)
def transaction(self, **kwargs):
"""Get a transaction that uses this client.
See :class:`~google.cloud.firestore_v1beta1.transaction.Transaction`
for more information on transactions and the constructor arguments.
Args:
kwargs (Dict[str, Any]): The keyword arguments (other than
``client``) to pass along to the
:class:`~google.cloud.firestore_v1beta1.transaction.Transaction`
constructor.
Returns:
~.firestore_v1beta1.transaction.Transaction: A transaction
attached to this client.
"""
return Transaction(self, **kwargs)
def _reference_info(references):
"""Get information about document references.
Helper for :meth:`~google.cloud.firestore_v1beta1.client.Client.get_all`.
Args:
references (List[.DocumentReference, ...]): Iterable of document
references.
Returns:
Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of
* fully-qualified documents paths for each reference in ``references``
* a mapping from the paths to the original reference. (If multiple
``references`` contains multiple references to the same document,
that key will be overwritten in the result.)
"""
document_paths = []
reference_map = {}
for reference in references:
doc_path = reference._document_path
document_paths.append(doc_path)
reference_map[doc_path] = reference
return document_paths, reference_map
def _get_reference(document_path, reference_map):
"""Get a document reference from a dictionary.
This just wraps a simple dictionary look-up with a helpful error that is
specific to :meth:`~google.cloud.firestore.client.Client.get_all`, the
**public** caller of this function.
Args:
document_path (str): A fully-qualified document path.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
Returns:
.DocumentReference: The matching reference.
Raises:
ValueError: If ``document_path`` has not been encountered.
"""
try:
return reference_map[document_path]
except KeyError:
msg = _BAD_DOC_TEMPLATE.format(document_path)
raise ValueError(msg)
def _parse_batch_get(get_doc_response, reference_map, client):
"""Parse a `BatchGetDocumentsResponse` protobuf.
Args:
get_doc_response (~google.cloud.proto.firestore.v1beta1.\
firestore_pb2.BatchGetDocumentsResponse): A single response (from
a stream) containing the "get" response for a document.
reference_map (Dict[str, .DocumentReference]): A mapping (produced
by :func:`_reference_info`) of fully-qualified document paths to
document references.
client (~.firestore_v1beta1.client.Client): A client that has
a document factory.
Returns:
[.DocumentSnapshot]: The retrieved snapshot.
Raises:
ValueError: If the response has a ``result`` field (a oneof) other
than ``found`` or ``missing``.
"""
result_type = get_doc_response.WhichOneof("result")
if result_type == "found":
reference = _get_reference(get_doc_response.found.name, reference_map)
data = _helpers.decode_dict(get_doc_response.found.fields, client)
snapshot = DocumentSnapshot(
reference,
data,
exists=True,
read_time=get_doc_response.read_time,
create_time=get_doc_response.found.create_time,
update_time=get_doc_response.found.update_time,
)
elif result_type == "missing":
snapshot = DocumentSnapshot(
None,
None,
exists=False,
read_time=get_doc_response.read_time,
create_time=None,
update_time=None,
)
else:
raise ValueError(
"`BatchGetDocumentsResponse.result` (a oneof) had a field other "
"than `found` or `missing` set, or was unset"
)
return snapshot
def _get_doc_mask(field_paths):
"""Get a document mask if field paths are provided.
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results.
Returns:
Optional[google.cloud.firestore_v1beta1.types.DocumentMask]: A mask
to project documents to a restricted set of field paths.
"""
if field_paths is None:
return None
else:
return types.DocumentMask(field_paths=field_paths)
def _item_to_collection_ref(iterator, item):
"""Convert collection ID to collection ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (str): ID of the collection
"""
return iterator.client.collection(item)

View file

@ -0,0 +1,478 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for representing collections for the Google Cloud Firestore API."""
import random
import warnings
import six
from google.cloud.firestore_v1beta1 import _helpers
from google.cloud.firestore_v1beta1 import query as query_mod
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.watch import Watch
from google.cloud.firestore_v1beta1 import document
_AUTO_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
class CollectionReference(object):
"""A reference to a collection in a Firestore database.
The collection may already exist or this class can facilitate creation
of documents within the collection.
Args:
path (Tuple[str, ...]): The components in the collection path.
This is a series of strings representing each collection and
sub-collection ID, as well as the document IDs for any documents
that contain a sub-collection.
kwargs (dict): The keyword arguments for the constructor. The only
supported keyword is ``client`` and it must be a
:class:`~google.cloud.firestore_v1beta1.client.Client` if
provided. It represents the client that created this collection
reference.
Raises:
ValueError: if
* the ``path`` is empty
* there are an even number of elements
* a collection ID in ``path`` is not a string
* a document ID in ``path`` is not a string
TypeError: If a keyword other than ``client`` is used.
"""
def __init__(self, *path, **kwargs):
_helpers.verify_path(path, is_collection=True)
self._path = path
self._client = kwargs.pop("client", None)
if kwargs:
raise TypeError(
"Received unexpected arguments", kwargs, "Only `client` is supported"
)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._path == other._path and self._client == other._client
@property
def id(self):
"""The collection identifier.
Returns:
str: The last component of the path.
"""
return self._path[-1]
@property
def parent(self):
"""Document that owns the current collection.
Returns:
Optional[~.firestore_v1beta1.document.DocumentReference]: The
parent document, if the current collection is not a
top-level collection.
"""
if len(self._path) == 1:
return None
else:
parent_path = self._path[:-1]
return self._client.document(*parent_path)
def document(self, document_id=None):
"""Create a sub-document underneath the current collection.
Args:
document_id (Optional[str]): The document identifier
within the current collection. If not provided, will default
to a random 20 character string composed of digits,
uppercase and lowercase and letters.
Returns:
~.firestore_v1beta1.document.DocumentReference: The child
document.
"""
if document_id is None:
document_id = _auto_id()
child_path = self._path + (document_id,)
return self._client.document(*child_path)
def _parent_info(self):
"""Get fully-qualified parent path and prefix for this collection.
Returns:
Tuple[str, str]: Pair of
* the fully-qualified (with database and project) path to the
parent of this collection (will either be the database path
or a document path).
* the prefix to a document in this collection.
"""
parent_doc = self.parent
if parent_doc is None:
parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join(
(self._client._database_string, "documents")
)
else:
parent_path = parent_doc._document_path
expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id))
return parent_path, expected_prefix
def add(self, document_data, document_id=None):
"""Create a document in the Firestore database with the provided data.
Args:
document_data (dict): Property names and values to use for
creating the document.
document_id (Optional[str]): The document identifier within the
current collection. If not provided, an ID will be
automatically assigned by the server (the assigned ID will be
a random 20 character string composed of digits,
uppercase and lowercase letters).
Returns:
Tuple[google.protobuf.timestamp_pb2.Timestamp, \
~.firestore_v1beta1.document.DocumentReference]: Pair of
* The ``update_time`` when the document was created (or
overwritten).
* A document reference for the created document.
Raises:
~google.cloud.exceptions.Conflict: If ``document_id`` is provided
and the document already exists.
"""
if document_id is None:
parent_path, expected_prefix = self._parent_info()
document_pb = document_pb2.Document()
created_document_pb = self._client._firestore_api.create_document(
parent_path,
collection_id=self.id,
document_id=None,
document=document_pb,
mask=None,
metadata=self._client._rpc_metadata,
)
new_document_id = _helpers.get_doc_id(created_document_pb, expected_prefix)
document_ref = self.document(new_document_id)
set_result = document_ref.set(document_data)
return set_result.update_time, document_ref
else:
document_ref = self.document(document_id)
write_result = document_ref.create(document_data)
return write_result.update_time, document_ref
def list_documents(self, page_size=None):
"""List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty
"""
parent, _ = self._parent_info()
iterator = self._client._firestore_api.list_documents(
parent,
self.id,
page_size=page_size,
show_missing=True,
metadata=self._client._rpc_metadata,
)
iterator.collection = self
iterator.item_to_value = _item_to_document_ref
return iterator
def select(self, field_paths):
"""Create a "select" query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.select` for
more information on this method.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query.
"""
query = query_mod.Query(self)
return query.select(field_paths)
def where(self, field_path, op_string, value):
"""Create a "where" query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.where` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) for the field to filter on.
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
value (Any): The value to compare the field against in the filter.
If ``value`` is :data:`None` or a NaN, then ``==`` is the only
allowed operation.
Returns:
~.firestore_v1beta1.query.Query: A filtered query.
"""
query = query_mod.Query(self)
return query.where(field_path, op_string, value)
def order_by(self, field_path, **kwargs):
"""Create an "order by" query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by` for
more information on this method.
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
kwargs (Dict[str, Any]): The keyword arguments to pass along
to the query. The only supported keyword is ``direction``, see
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`
for more information.
Returns:
~.firestore_v1beta1.query.Query: An "order by" query.
"""
query = query_mod.Query(self)
return query.order_by(field_path, **kwargs)
def limit(self, count):
"""Create a limited query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.limit` for
more information on this method.
Args:
count (int): Maximum number of documents to return that match
the query.
Returns:
~.firestore_v1beta1.query.Query: A limited query.
"""
query = query_mod.Query(self)
return query.limit(count)
def offset(self, num_to_skip):
"""Skip to an offset in a query with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.offset` for
more information on this method.
Args:
num_to_skip (int): The number of results to skip at the beginning
of query results. (Must be non-negative.)
Returns:
~.firestore_v1beta1.query.Query: An offset query.
"""
query = query_mod.Query(self)
return query.offset(num_to_skip)
def start_at(self, document_fields):
"""Start query at a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.start_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_at(document_fields)
def start_after(self, document_fields):
"""Start query after a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.start_after` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.start_after(document_fields)
def end_before(self, document_fields):
"""End query before a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.end_before` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.end_before(document_fields)
def end_at(self, document_fields):
"""End query at a cursor with this collection as parent.
See
:meth:`~google.cloud.firestore_v1beta1.query.Query.end_at` for
more information on this method.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor.
"""
query = query_mod.Query(self)
return query.end_at(document_fields)
def get(self, transaction=None):
"""Deprecated alias for :meth:`stream`."""
warnings.warn(
"'Collection.get' is deprecated: please use 'Collection.stream' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.stream(transaction=transaction)
def stream(self, transaction=None):
"""Read the documents in this collection.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that the query will
run in.
Yields:
~.firestore_v1beta1.document.DocumentSnapshot: The next
document that fulfills the query.
"""
query = query_mod.Query(self)
return query.stream(transaction=transaction)
def on_snapshot(self, callback):
"""Monitor the documents in this collection.
This starts a watch on this collection using a background thread. The
provided callback is run on the snapshot of the documents.
Args:
callback(~.firestore.collection.CollectionSnapshot): a callback
to run when a change occurs.
Example:
from google.cloud import firestore_v1beta1
db = firestore_v1beta1.Client()
collection_ref = db.collection(u'users')
def on_snapshot(collection_snapshot):
for doc in collection_snapshot.documents:
print(u'{} => {}'.format(doc.id, doc.to_dict()))
# Watch this collection
collection_watch = collection_ref.on_snapshot(on_snapshot)
# Terminate this watch
collection_watch.unsubscribe()
"""
return Watch.for_query(
query_mod.Query(self),
callback,
document.DocumentSnapshot,
document.DocumentReference,
)
def _auto_id():
"""Generate a "random" automatically generated ID.
Returns:
str: A 20 character string composed of digits, uppercase and
lowercase and letters.
"""
return "".join(random.choice(_AUTO_ID_CHARS) for _ in six.moves.xrange(20))
def _item_to_document_ref(iterator, item):
"""Convert Document resource to document ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (dict): document resource
"""
document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1]
return iterator.collection.document(document_id)

View file

@ -0,0 +1,780 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for representing documents for the Google Cloud Firestore API."""
import copy
import six
from google.api_core import exceptions
from google.cloud.firestore_v1beta1 import _helpers
from google.cloud.firestore_v1beta1 import field_path as field_path_module
from google.cloud.firestore_v1beta1.proto import common_pb2
from google.cloud.firestore_v1beta1.watch import Watch
class DocumentReference(object):
"""A reference to a document in a Firestore database.
The document may already exist or can be created by this class.
Args:
path (Tuple[str, ...]): The components in the document path.
This is a series of strings representing each collection and
sub-collection ID, as well as the document IDs for any documents
that contain a sub-collection (as well as the base document).
kwargs (dict): The keyword arguments for the constructor. The only
supported keyword is ``client`` and it must be a
:class:`~google.cloud.firestore_v1beta1.client.Client`.
It represents the client that created this document reference.
Raises:
ValueError: if
* the ``path`` is empty
* there are an even number of elements
* a collection ID in ``path`` is not a string
* a document ID in ``path`` is not a string
TypeError: If a keyword other than ``client`` is used.
"""
_document_path_internal = None
def __init__(self, *path, **kwargs):
_helpers.verify_path(path, is_collection=False)
self._path = path
self._client = kwargs.pop("client", None)
if kwargs:
raise TypeError(
"Received unexpected arguments", kwargs, "Only `client` is supported"
)
def __copy__(self):
"""Shallow copy the instance.
We leave the client "as-is" but tuple-unpack the path.
Returns:
.DocumentReference: A copy of the current document.
"""
result = self.__class__(*self._path, client=self._client)
result._document_path_internal = self._document_path_internal
return result
def __deepcopy__(self, unused_memo):
"""Deep copy the instance.
This isn't a true deep copy, wee leave the client "as-is" but
tuple-unpack the path.
Returns:
.DocumentReference: A copy of the current document.
"""
return self.__copy__()
def __eq__(self, other):
"""Equality check against another instance.
Args:
other (Any): A value to compare against.
Returns:
Union[bool, NotImplementedType]: Indicating if the values are
equal.
"""
if isinstance(other, DocumentReference):
return self._client == other._client and self._path == other._path
else:
return NotImplemented
def __hash__(self):
return hash(self._path) + hash(self._client)
def __ne__(self, other):
"""Inequality check against another instance.
Args:
other (Any): A value to compare against.
Returns:
Union[bool, NotImplementedType]: Indicating if the values are
not equal.
"""
if isinstance(other, DocumentReference):
return self._client != other._client or self._path != other._path
else:
return NotImplemented
@property
def path(self):
"""Database-relative for this document.
Returns:
str: The document's relative path.
"""
return "/".join(self._path)
@property
def _document_path(self):
"""Create and cache the full path for this document.
Of the form:
``projects/{project_id}/databases/{database_id}/...
documents/{document_path}``
Returns:
str: The full document path.
Raises:
ValueError: If the current document reference has no ``client``.
"""
if self._document_path_internal is None:
if self._client is None:
raise ValueError("A document reference requires a `client`.")
self._document_path_internal = _get_document_path(self._client, self._path)
return self._document_path_internal
@property
def id(self):
"""The document identifier (within its collection).
Returns:
str: The last component of the path.
"""
return self._path[-1]
@property
def parent(self):
"""Collection that owns the current document.
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
parent collection.
"""
parent_path = self._path[:-1]
return self._client.collection(*parent_path)
def collection(self, collection_id):
"""Create a sub-collection underneath the current document.
Args:
collection_id (str): The sub-collection identifier (sometimes
referred to as the "kind").
Returns:
~.firestore_v1beta1.collection.CollectionReference: The
child collection.
"""
child_path = self._path + (collection_id,)
return self._client.collection(*child_path)
def create(self, document_data):
"""Create the current document in the Firestore database.
Args:
document_data (dict): Property names and values to use for
creating a document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
Raises:
~google.cloud.exceptions.Conflict: If the document already exists.
"""
batch = self._client.batch()
batch.create(self, document_data)
write_results = batch.commit()
return _first_write_result(write_results)
def set(self, document_data, merge=False):
"""Replace the current document in the Firestore database.
A write ``option`` can be specified to indicate preconditions of
the "set" operation. If no ``option`` is specified and this document
doesn't exist yet, this method will create it.
Overwrites all content for the document with the fields in
``document_data``. This method performs almost the same functionality
as :meth:`create`. The only difference is that this method doesn't
make any requirements on the existence of the document (unless
``option`` is used), whereas as :meth:`create` will fail if the
document already exists.
Args:
document_data (dict): Property names and values to use for
replacing a document.
merge (Optional[bool] or Optional[List<apispec>]):
If True, apply merging instead of overwriting the state
of the document.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the committed document. A write
result contains an ``update_time`` field.
"""
batch = self._client.batch()
batch.set(self, document_data, merge=merge)
write_results = batch.commit()
return _first_write_result(write_results)
def update(self, field_updates, option=None):
"""Update an existing document in the Firestore database.
By default, this method verifies that the document exists on the
server before making updates. A write ``option`` can be specified to
override these preconditions.
Each key in ``field_updates`` can either be a field name or a
**field path** (For more information on **field paths**, see
:meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`.) To
illustrate this, consider a document with
.. code-block:: python
>>> snapshot = document.get()
>>> snapshot.to_dict()
{
'foo': {
'bar': 'baz',
},
'other': True,
}
stored on the server. If the field name is used in the update:
.. code-block:: python
>>> field_updates = {
... 'foo': {
... 'quux': 800,
... },
... }
>>> document.update(field_updates)
then all of ``foo`` will be overwritten on the server and the new
value will be
.. code-block:: python
>>> snapshot = document.get()
>>> snapshot.to_dict()
{
'foo': {
'quux': 800,
},
'other': True,
}
On the other hand, if a ``.``-delimited **field path** is used in the
update:
.. code-block:: python
>>> field_updates = {
... 'foo.quux': 800,
... }
>>> document.update(field_updates)
then only ``foo.quux`` will be updated on the server and the
field ``foo.bar`` will remain intact:
.. code-block:: python
>>> snapshot = document.get()
>>> snapshot.to_dict()
{
'foo': {
'bar': 'baz',
'quux': 800,
},
'other': True,
}
.. warning::
A **field path** can only be used as a top-level key in
``field_updates``.
To delete / remove a field from an existing document, use the
:attr:`~google.cloud.firestore_v1beta1.transforms.DELETE_FIELD`
sentinel. So with the example above, sending
.. code-block:: python
>>> field_updates = {
... 'other': firestore.DELETE_FIELD,
... }
>>> document.update(field_updates)
would update the value on the server to:
.. code-block:: python
>>> snapshot = document.get()
>>> snapshot.to_dict()
{
'foo': {
'bar': 'baz',
},
}
To set a field to the current time on the server when the
update is received, use the
:attr:`~google.cloud.firestore_v1beta1.transforms.SERVER_TIMESTAMP`
sentinel. Sending
.. code-block:: python
>>> field_updates = {
... 'foo.now': firestore.SERVER_TIMESTAMP,
... }
>>> document.update(field_updates)
would update the value on the server to:
.. code-block:: python
>>> snapshot = document.get()
>>> snapshot.to_dict()
{
'foo': {
'bar': 'baz',
'now': datetime.datetime(2012, ...),
},
'other': True,
}
Args:
field_updates (dict): Field names or paths to update and values
to update with.
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
write result corresponding to the updated document. A write
result contains an ``update_time`` field.
Raises:
~google.cloud.exceptions.NotFound: If the document does not exist.
"""
batch = self._client.batch()
batch.update(self, field_updates, option=option)
write_results = batch.commit()
return _first_write_result(write_results)
def delete(self, option=None):
"""Delete the current document in the Firestore database.
Args:
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.protobuf.timestamp_pb2.Timestamp: The time that the delete
request was received by the server. If the document did not exist
when the delete was sent (i.e. nothing was deleted), this method
will still succeed and will still return the time that the
request was received by the server.
"""
write_pb = _helpers.pb_for_delete(self._document_path, option)
commit_response = self._client._firestore_api.commit(
self._client._database_string,
[write_pb],
transaction=None,
metadata=self._client._rpc_metadata,
)
return commit_response.commit_time
def get(self, field_paths=None, transaction=None):
"""Retrieve a snapshot of the current document.
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`
for more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this reference
will be retrieved in.
Returns:
~.firestore_v1beta1.document.DocumentSnapshot: A snapshot of
the current document. If the document does not exist at
the time of `snapshot`, the snapshot `reference`, `data`,
`update_time`, and `create_time` attributes will all be
`None` and `exists` will be `False`.
"""
if isinstance(field_paths, six.string_types):
raise ValueError("'field_paths' must be a sequence of paths, not a string.")
if field_paths is not None:
mask = common_pb2.DocumentMask(field_paths=sorted(field_paths))
else:
mask = None
firestore_api = self._client._firestore_api
try:
document_pb = firestore_api.get_document(
self._document_path,
mask=mask,
transaction=_helpers.get_transaction_id(transaction),
metadata=self._client._rpc_metadata,
)
except exceptions.NotFound:
data = None
exists = False
create_time = None
update_time = None
else:
data = _helpers.decode_dict(document_pb.fields, self._client)
exists = True
create_time = document_pb.create_time
update_time = document_pb.update_time
return DocumentSnapshot(
reference=self,
data=data,
exists=exists,
read_time=None, # No server read_time available
create_time=create_time,
update_time=update_time,
)
def collections(self, page_size=None):
"""List subcollections of the current document.
Args:
page_size (Optional[int]]): The maximum number of collections
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.CollectionReference]:
iterator of subcollections of the current document. If the
document does not exist at the time of `snapshot`, the
iterator will be empty
"""
iterator = self._client._firestore_api.list_collection_ids(
self._document_path,
page_size=page_size,
metadata=self._client._rpc_metadata,
)
iterator.document = self
iterator.item_to_value = _item_to_collection_ref
return iterator
def on_snapshot(self, callback):
"""Watch this document.
This starts a watch on this document using a background thread. The
provided callback is run on the snapshot.
Args:
callback(~.firestore.document.DocumentSnapshot):a callback to run
when a change occurs
Example:
from google.cloud import firestore_v1beta1
db = firestore_v1beta1.Client()
collection_ref = db.collection(u'users')
def on_snapshot(document_snapshot):
doc = document_snapshot
print(u'{} => {}'.format(doc.id, doc.to_dict()))
doc_ref = db.collection(u'users').document(
u'alovelace' + unique_resource_id())
# Watch this document
doc_watch = doc_ref.on_snapshot(on_snapshot)
# Terminate this watch
doc_watch.unsubscribe()
"""
return Watch.for_document(self, callback, DocumentSnapshot, DocumentReference)
class DocumentSnapshot(object):
"""A snapshot of document data in a Firestore database.
This represents data retrieved at a specific time and may not contain
all fields stored for the document (i.e. a hand-picked selection of
fields may have been retrieved).
Instances of this class are not intended to be constructed by hand,
rather they'll be returned as responses to various methods, such as
:meth:`~google.cloud.DocumentReference.get`.
Args:
reference (~.firestore_v1beta1.document.DocumentReference): A
document reference corresponding to the document that contains
the data in this snapshot.
data (Dict[str, Any]): The data retrieved in the snapshot.
exists (bool): Indicates if the document existed at the time the
snapshot was retrieved.
read_time (google.protobuf.timestamp_pb2.Timestamp): The time that
this snapshot was read from the server.
create_time (google.protobuf.timestamp_pb2.Timestamp): The time that
this document was created.
update_time (google.protobuf.timestamp_pb2.Timestamp): The time that
this document was last updated.
"""
def __init__(self, reference, data, exists, read_time, create_time, update_time):
self._reference = reference
# We want immutable data, so callers can't modify this value
# out from under us.
self._data = copy.deepcopy(data)
self._exists = exists
self.read_time = read_time
"""google.protobuf.timestamp_pb2.Timestamp: Time snapshot was read."""
self.create_time = create_time
"""google.protobuf.timestamp_pb2.Timestamp: Document's creation."""
self.update_time = update_time
"""google.protobuf.timestamp_pb2.Timestamp: Document's last update."""
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._reference == other._reference and self._data == other._data
def __hash__(self):
seconds = self.update_time.seconds
nanos = self.update_time.nanos
return hash(self._reference) + hash(seconds) + hash(nanos)
@property
def _client(self):
"""The client that owns the document reference for this snapshot.
Returns:
~.firestore_v1beta1.client.Client: The client that owns this
document.
"""
return self._reference._client
@property
def exists(self):
"""Existence flag.
Indicates if the document existed at the time this snapshot
was retrieved.
Returns:
bool: The existence flag.
"""
return self._exists
@property
def id(self):
"""The document identifier (within its collection).
Returns:
str: The last component of the path of the document.
"""
return self._reference.id
@property
def reference(self):
"""Document reference corresponding to document that owns this data.
Returns:
~.firestore_v1beta1.document.DocumentReference: A document
reference corresponding to this document.
"""
return self._reference
def get(self, field_path):
"""Get a value from the snapshot data.
If the data is nested, for example:
.. code-block:: python
>>> snapshot.to_dict()
{
'top1': {
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
},
'top6': b'\x00\x01 foo',
}
a **field path** can be used to access the nested data. For
example:
.. code-block:: python
>>> snapshot.get('top1')
{
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
}
>>> snapshot.get('top1.middle2')
{
'bottom3': 20,
'bottom4': 22,
}
>>> snapshot.get('top1.middle2.bottom3')
20
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`
for more information on **field paths**.
A copy is returned since the data may contain mutable values,
but the data stored in the snapshot must remain immutable.
Args:
field_path (str): A field path (``.``-delimited list of
field names).
Returns:
Any or None:
(A copy of) the value stored for the ``field_path`` or
None if snapshot document does not exist.
Raises:
KeyError: If the ``field_path`` does not match nested data
in the snapshot.
"""
if not self._exists:
return None
nested_data = field_path_module.get_nested_value(field_path, self._data)
return copy.deepcopy(nested_data)
def to_dict(self):
"""Retrieve the data contained in this snapshot.
A copy is returned since the data may contain mutable values,
but the data stored in the snapshot must remain immutable.
Returns:
Dict[str, Any] or None:
The data in the snapshot. Returns None if reference
does not exist.
"""
if not self._exists:
return None
return copy.deepcopy(self._data)
def _get_document_path(client, path):
"""Convert a path tuple into a full path string.
Of the form:
``projects/{project_id}/databases/{database_id}/...
documents/{document_path}``
Args:
client (~.firestore_v1beta1.client.Client): The client that holds
configuration details and a GAPIC client object.
path (Tuple[str, ...]): The components in a document path.
Returns:
str: The fully-qualified document path.
"""
parts = (client._database_string, "documents") + path
return _helpers.DOCUMENT_PATH_DELIMITER.join(parts)
def _consume_single_get(response_iterator):
"""Consume a gRPC stream that should contain a single response.
The stream will correspond to a ``BatchGetDocuments`` request made
for a single document.
Args:
response_iterator (~google.cloud.exceptions.GrpcRendezvous): A
streaming iterator returned from a ``BatchGetDocuments``
request.
Returns:
~google.cloud.proto.firestore.v1beta1.\
firestore_pb2.BatchGetDocumentsResponse: The single "get"
response in the batch.
Raises:
ValueError: If anything other than exactly one response is returned.
"""
# Calling ``list()`` consumes the entire iterator.
all_responses = list(response_iterator)
if len(all_responses) != 1:
raise ValueError(
"Unexpected response from `BatchGetDocumentsResponse`",
all_responses,
"Expected only one result",
)
return all_responses[0]
def _first_write_result(write_results):
"""Get first write result from list.
For cases where ``len(write_results) > 1``, this assumes the writes
occurred at the same time (e.g. if an update and transform are sent
at the same time).
Args:
write_results (List[google.cloud.proto.firestore.v1beta1.\
write_pb2.WriteResult, ...]: The write results from a
``CommitResponse``.
Returns:
google.cloud.firestore_v1beta1.types.WriteResult: The
lone write result from ``write_results``.
Raises:
ValueError: If there are zero write results. This is likely to
**never** occur, since the backend should be stable.
"""
if not write_results:
raise ValueError("Expected at least one write result")
return write_results[0]
def _item_to_collection_ref(iterator, item):
"""Convert collection ID to collection ref.
Args:
iterator (google.api_core.page_iterator.GRPCIterator):
iterator response
item (str): ID of the collection
"""
return iterator.document.collection(item)

View file

@ -0,0 +1,386 @@
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for managing / converting field paths to / from strings."""
try:
from collections import abc as collections_abc
except ImportError: # Python 2.7
import collections as collections_abc
import re
import six
_FIELD_PATH_MISSING_TOP = "{!r} is not contained in the data"
_FIELD_PATH_MISSING_KEY = "{!r} is not contained in the data for the key {!r}"
_FIELD_PATH_WRONG_TYPE = (
"The data at {!r} is not a dictionary, so it cannot contain the key {!r}"
)
_FIELD_PATH_DELIMITER = "."
_BACKSLASH = "\\"
_ESCAPED_BACKSLASH = _BACKSLASH * 2
_BACKTICK = "`"
_ESCAPED_BACKTICK = _BACKSLASH + _BACKTICK
_SIMPLE_FIELD_NAME = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*$")
_LEADING_ALPHA_INVALID = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*[^_a-zA-Z0-9]")
PATH_ELEMENT_TOKENS = [
("SIMPLE", r"[_a-zA-Z][_a-zA-Z0-9]*"), # unquoted elements
("QUOTED", r"`(?:\\`|[^`])*?`"), # quoted elements, unquoted
("DOT", r"\."), # separator
]
TOKENS_PATTERN = "|".join("(?P<{}>{})".format(*pair) for pair in PATH_ELEMENT_TOKENS)
TOKENS_REGEX = re.compile(TOKENS_PATTERN)
def _tokenize_field_path(path):
"""Lex a field path into tokens (including dots).
Args:
path (str): field path to be lexed.
Returns:
List(str): tokens
"""
pos = 0
get_token = TOKENS_REGEX.match
match = get_token(path)
while match is not None:
type_ = match.lastgroup
value = match.group(type_)
yield value
pos = match.end()
match = get_token(path, pos)
if pos != len(path):
raise ValueError("Path {} not consumed, residue: {}".format(path, path[pos:]))
def split_field_path(path):
"""Split a field path into valid elements (without dots).
Args:
path (str): field path to be lexed.
Returns:
List(str): tokens
Raises:
ValueError: if the path does not match the elements-interspersed-
with-dots pattern.
"""
if not path:
return []
elements = []
want_dot = False
for element in _tokenize_field_path(path):
if want_dot:
if element != ".":
raise ValueError("Invalid path: {}".format(path))
else:
want_dot = False
else:
if element == ".":
raise ValueError("Invalid path: {}".format(path))
elements.append(element)
want_dot = True
if not want_dot or not elements:
raise ValueError("Invalid path: {}".format(path))
return elements
def parse_field_path(api_repr):
"""Parse a **field path** from into a list of nested field names.
See :func:`field_path` for more on **field paths**.
Args:
api_repr (str):
The unique Firestore api representation which consists of
either simple or UTF-8 field names. It cannot exceed
1500 bytes, and cannot be empty. Simple field names match
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
escaped by surrounding them with backticks.
Returns:
List[str, ...]: The list of field names in the field path.
"""
# code dredged back up from
# https://github.com/googleapis/google-cloud-python/pull/5109/files
field_names = []
for field_name in split_field_path(api_repr):
# non-simple field name
if field_name[0] == "`" and field_name[-1] == "`":
field_name = field_name[1:-1]
field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK)
field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH)
field_names.append(field_name)
return field_names
def render_field_path(field_names):
"""Create a **field path** from a list of nested field names.
A **field path** is a ``.``-delimited concatenation of the field
names. It is used to represent a nested field. For example,
in the data
.. code-block: python
data = {
'aa': {
'bb': {
'cc': 10,
},
},
}
the field path ``'aa.bb.cc'`` represents that data stored in
``data['aa']['bb']['cc']``.
Args:
field_names (Iterable[str, ...]): The list of field names.
Returns:
str: The ``.``-delimited field path.
"""
result = []
for field_name in field_names:
match = _SIMPLE_FIELD_NAME.match(field_name)
if match and match.group(0) == field_name:
result.append(field_name)
else:
replaced = field_name.replace(_BACKSLASH, _ESCAPED_BACKSLASH).replace(
_BACKTICK, _ESCAPED_BACKTICK
)
result.append(_BACKTICK + replaced + _BACKTICK)
return _FIELD_PATH_DELIMITER.join(result)
get_field_path = render_field_path # backward-compatibility
def get_nested_value(field_path, data):
"""Get a (potentially nested) value from a dictionary.
If the data is nested, for example:
.. code-block:: python
>>> data
{
'top1': {
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
},
'top6': b'\x00\x01 foo',
}
a **field path** can be used to access the nested data. For
example:
.. code-block:: python
>>> get_nested_value('top1', data)
{
'middle2': {
'bottom3': 20,
'bottom4': 22,
},
'middle5': True,
}
>>> get_nested_value('top1.middle2', data)
{
'bottom3': 20,
'bottom4': 22,
}
>>> get_nested_value('top1.middle2.bottom3', data)
20
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path` for
more information on **field paths**.
Args:
field_path (str): A field path (``.``-delimited list of
field names).
data (Dict[str, Any]): The (possibly nested) data.
Returns:
Any: (A copy of) the value stored for the ``field_path``.
Raises:
KeyError: If the ``field_path`` does not match nested data.
"""
field_names = parse_field_path(field_path)
nested_data = data
for index, field_name in enumerate(field_names):
if isinstance(nested_data, collections_abc.Mapping):
if field_name in nested_data:
nested_data = nested_data[field_name]
else:
if index == 0:
msg = _FIELD_PATH_MISSING_TOP.format(field_name)
raise KeyError(msg)
else:
partial = render_field_path(field_names[:index])
msg = _FIELD_PATH_MISSING_KEY.format(field_name, partial)
raise KeyError(msg)
else:
partial = render_field_path(field_names[:index])
msg = _FIELD_PATH_WRONG_TYPE.format(partial, field_name)
raise KeyError(msg)
return nested_data
class FieldPath(object):
"""Field Path object for client use.
A field path is a sequence of element keys, separated by periods.
Each element key can be either a simple identifier, or a full unicode
string.
In the string representation of a field path, non-identifier elements
must be quoted using backticks, with internal backticks and backslashes
escaped with a backslash.
Args:
parts: (one or more strings)
Indicating path of the key to be used.
"""
def __init__(self, *parts):
for part in parts:
if not isinstance(part, six.string_types) or not part:
error = "One or more components is not a string or is empty."
raise ValueError(error)
self.parts = tuple(parts)
@classmethod
def from_api_repr(cls, api_repr):
"""Factory: create a FieldPath from the string formatted per the API.
Args:
api_repr (str): a string path, with non-identifier elements quoted
It cannot exceed 1500 characters, and cannot be empty.
Returns:
(:class:`FieldPath`) An instance parsed from ``api_repr``.
Raises:
ValueError if the parsing fails
"""
api_repr = api_repr.strip()
if not api_repr:
raise ValueError("Field path API representation cannot be empty.")
return cls(*parse_field_path(api_repr))
@classmethod
def from_string(cls, path_string):
"""Factory: create a FieldPath from a unicode string representation.
This method splits on the character `.` and disallows the
characters `~*/[]`. To create a FieldPath whose components have
those characters, call the constructor.
Args:
path_string (str): A unicode string which cannot contain
`~*/[]` characters, cannot exceed 1500 bytes, and cannot be empty.
Returns:
(:class:`FieldPath`) An instance parsed from ``path_string``.
"""
try:
return cls.from_api_repr(path_string)
except ValueError:
elements = path_string.split(".")
for element in elements:
if not element:
raise ValueError("Empty element")
if _LEADING_ALPHA_INVALID.match(element):
raise ValueError(
"Non-alphanum char in element with leading alpha: {}".format(
element
)
)
return FieldPath(*elements)
def __repr__(self):
paths = ""
for part in self.parts:
paths += "'" + part + "',"
paths = paths[:-1]
return "FieldPath({})".format(paths)
def __hash__(self):
return hash(self.to_api_repr())
def __eq__(self, other):
if isinstance(other, FieldPath):
return self.parts == other.parts
return NotImplemented
def __lt__(self, other):
if isinstance(other, FieldPath):
return self.parts < other.parts
return NotImplemented
def __add__(self, other):
"""Adds `other` field path to end of this field path.
Args:
other (~google.cloud.firestore_v1beta1._helpers.FieldPath, str):
The field path to add to the end of this `FieldPath`.
"""
if isinstance(other, FieldPath):
parts = self.parts + other.parts
return FieldPath(*parts)
elif isinstance(other, six.string_types):
parts = self.parts + FieldPath.from_string(other).parts
return FieldPath(*parts)
else:
return NotImplemented
def to_api_repr(self):
"""Render a quoted string representation of the FieldPath
Returns:
(str) Quoted string representation of the path stored
within this FieldPath.
"""
return render_field_path(self.parts)
def eq_or_parent(self, other):
"""Check whether ``other`` is an ancestor.
Returns:
(bool) True IFF ``other`` is an ancestor or equal to ``self``,
else False.
"""
return self.parts[: len(other.parts)] == other.parts[: len(self.parts)]
def lineage(self):
"""Return field paths for all parents.
Returns: Set[:class:`FieldPath`]
"""
indexes = six.moves.range(1, len(self.parts))
return {FieldPath(*self.parts[:index]) for index in indexes}

View file

@ -0,0 +1,156 @@
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class NullValue(enum.IntEnum):
"""
``NullValue`` is a singleton enumeration to represent the null value
for the ``Value`` type union.
The JSON representation for ``NullValue`` is JSON ``null``.
Attributes:
NULL_VALUE (int): Null value.
"""
NULL_VALUE = 0
class DocumentTransform(object):
class FieldTransform(object):
class ServerValue(enum.IntEnum):
"""
A value that is calculated by the server.
Attributes:
SERVER_VALUE_UNSPECIFIED (int): Unspecified. This value must not be used.
REQUEST_TIME (int): The time at which the server processed the request, with millisecond
precision.
"""
SERVER_VALUE_UNSPECIFIED = 0
REQUEST_TIME = 1
class StructuredQuery(object):
class Direction(enum.IntEnum):
"""
A sort direction.
Attributes:
DIRECTION_UNSPECIFIED (int): Unspecified.
ASCENDING (int): Ascending.
DESCENDING (int): Descending.
"""
DIRECTION_UNSPECIFIED = 0
ASCENDING = 1
DESCENDING = 2
class CompositeFilter(object):
class Operator(enum.IntEnum):
"""
A composite filter operator.
Attributes:
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
AND (int): The results are required to satisfy each of the combined filters.
"""
OPERATOR_UNSPECIFIED = 0
AND = 1
class FieldFilter(object):
class Operator(enum.IntEnum):
"""
A field filter operator.
Attributes:
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
LESS_THAN (int): Less than. Requires that the field come first in ``order_by``.
LESS_THAN_OR_EQUAL (int): Less than or equal. Requires that the field come first in
``order_by``.
GREATER_THAN (int): Greater than. Requires that the field come first in ``order_by``.
GREATER_THAN_OR_EQUAL (int): Greater than or equal. Requires that the field come first in
``order_by``.
EQUAL (int): Equal.
ARRAY_CONTAINS (int): Contains. Requires that the field is an array.
IN (int): In. Requires that ``value`` is a non-empty ArrayValue with at most
10 values.
ARRAY_CONTAINS_ANY (int): Contains any. Requires that the field is an array and ``value`` is a
non-empty ArrayValue with at most 10 values.
"""
OPERATOR_UNSPECIFIED = 0
LESS_THAN = 1
LESS_THAN_OR_EQUAL = 2
GREATER_THAN = 3
GREATER_THAN_OR_EQUAL = 4
EQUAL = 5
ARRAY_CONTAINS = 7
IN = 8
ARRAY_CONTAINS_ANY = 9
class UnaryFilter(object):
class Operator(enum.IntEnum):
"""
A unary operator.
Attributes:
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
IS_NAN (int): Test if a field is equal to NaN.
IS_NULL (int): Test if an expression evaluates to Null.
"""
OPERATOR_UNSPECIFIED = 0
IS_NAN = 2
IS_NULL = 3
class TargetChange(object):
class TargetChangeType(enum.IntEnum):
"""
The type of change.
Attributes:
NO_CHANGE (int): No change has occurred. Used only to send an updated
``resume_token``.
ADD (int): The targets have been added.
REMOVE (int): The targets have been removed.
CURRENT (int): The targets reflect all changes committed before the targets were
added to the stream.
This will be sent after or with a ``read_time`` that is greater than or
equal to the time at which the targets were added.
Listeners can wait for this change if read-after-write semantics are
desired.
RESET (int): The targets have been reset, and a new initial state for the targets
will be returned in subsequent changes.
After the initial state is complete, ``CURRENT`` will be returned even
if the target was previously indicated to be ``CURRENT``.
"""
NO_CHANGE = 0
ADD = 1
REMOVE = 2
CURRENT = 3
RESET = 4

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,97 @@
config = {
"interfaces": {
"google.firestore.v1beta1.Firestore": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
},
"streaming": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 300000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 300000,
"total_timeout_millis": 600000,
},
},
"methods": {
"DeleteDocument": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"BatchGetDocuments": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "streaming",
},
"BeginTransaction": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"Rollback": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"RunQuery": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "streaming",
},
"Write": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "streaming",
},
"Listen": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "streaming",
},
"ListCollectionIds": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetDocument": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListDocuments": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateDocument": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"UpdateDocument": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"Commit": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}

View file

@ -0,0 +1,283 @@
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.firestore_v1beta1.proto import firestore_pb2_grpc
class FirestoreGrpcTransport(object):
"""gRPC transport class providing stubs for
google.firestore.v1beta1 Firestore API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/datastore",
)
def __init__(
self, channel=None, credentials=None, address="firestore.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive.",
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"firestore_stub": firestore_pb2_grpc.FirestoreStub(channel),
}
@classmethod
def create_channel(
cls, address="firestore.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def delete_document(self):
"""Return the gRPC stub for :meth:`FirestoreClient.delete_document`.
Deletes a document.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].DeleteDocument
@property
def batch_get_documents(self):
"""Return the gRPC stub for :meth:`FirestoreClient.batch_get_documents`.
Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].BatchGetDocuments
@property
def begin_transaction(self):
"""Return the gRPC stub for :meth:`FirestoreClient.begin_transaction`.
Starts a new transaction.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].BeginTransaction
@property
def rollback(self):
"""Return the gRPC stub for :meth:`FirestoreClient.rollback`.
Rolls back a transaction.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].Rollback
@property
def run_query(self):
"""Return the gRPC stub for :meth:`FirestoreClient.run_query`.
Runs a query.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].RunQuery
@property
def write(self):
"""Return the gRPC stub for :meth:`FirestoreClient.write`.
Streams batches of document updates and deletes, in order.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].Write
@property
def listen(self):
"""Return the gRPC stub for :meth:`FirestoreClient.listen`.
Listens to changes.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].Listen
@property
def list_collection_ids(self):
"""Return the gRPC stub for :meth:`FirestoreClient.list_collection_ids`.
Lists all the collection IDs underneath a document.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].ListCollectionIds
@property
def get_document(self):
"""Return the gRPC stub for :meth:`FirestoreClient.get_document`.
Gets a single document.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].GetDocument
@property
def list_documents(self):
"""Return the gRPC stub for :meth:`FirestoreClient.list_documents`.
Lists documents.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].ListDocuments
@property
def create_document(self):
"""Return the gRPC stub for :meth:`FirestoreClient.create_document`.
Creates a new document.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].CreateDocument
@property
def update_document(self):
"""Return the gRPC stub for :meth:`FirestoreClient.update_document`.
Updates or inserts a document.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].UpdateDocument
@property
def commit(self):
"""Return the gRPC stub for :meth:`FirestoreClient.commit`.
Commits a transaction, while optionally updating documents.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["firestore_stub"].Commit

View file

@ -0,0 +1,207 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from google.cloud.firestore_v1beta1._helpers import decode_value
import math
class TypeOrder(Enum):
# NOTE: This order is defined by the backend and cannot be changed.
NULL = 0
BOOLEAN = 1
NUMBER = 2
TIMESTAMP = 3
STRING = 4
BLOB = 5
REF = 6
GEO_POINT = 7
ARRAY = 8
OBJECT = 9
@staticmethod
def from_value(value):
v = value.WhichOneof("value_type")
lut = {
"null_value": TypeOrder.NULL,
"boolean_value": TypeOrder.BOOLEAN,
"integer_value": TypeOrder.NUMBER,
"double_value": TypeOrder.NUMBER,
"timestamp_value": TypeOrder.TIMESTAMP,
"string_value": TypeOrder.STRING,
"bytes_value": TypeOrder.BLOB,
"reference_value": TypeOrder.REF,
"geo_point_value": TypeOrder.GEO_POINT,
"array_value": TypeOrder.ARRAY,
"map_value": TypeOrder.OBJECT,
}
if v not in lut:
raise ValueError("Could not detect value type for " + v)
return lut[v]
class Order(object):
"""
Order implements the ordering semantics of the backend.
"""
@classmethod
def compare(cls, left, right):
"""
Main comparison function for all Firestore types.
@return -1 is left < right, 0 if left == right, otherwise 1
"""
# First compare the types.
leftType = TypeOrder.from_value(left).value
rightType = TypeOrder.from_value(right).value
if leftType != rightType:
if leftType < rightType:
return -1
return 1
value_type = left.WhichOneof("value_type")
if value_type == "null_value":
return 0 # nulls are all equal
elif value_type == "boolean_value":
return cls._compare_to(left.boolean_value, right.boolean_value)
elif value_type == "integer_value":
return cls.compare_numbers(left, right)
elif value_type == "double_value":
return cls.compare_numbers(left, right)
elif value_type == "timestamp_value":
return cls.compare_timestamps(left, right)
elif value_type == "string_value":
return cls._compare_to(left.string_value, right.string_value)
elif value_type == "bytes_value":
return cls.compare_blobs(left, right)
elif value_type == "reference_value":
return cls.compare_resource_paths(left, right)
elif value_type == "geo_point_value":
return cls.compare_geo_points(left, right)
elif value_type == "array_value":
return cls.compare_arrays(left, right)
elif value_type == "map_value":
return cls.compare_objects(left, right)
else:
raise ValueError("Unknown ``value_type``", str(value_type))
@staticmethod
def compare_blobs(left, right):
left_bytes = left.bytes_value
right_bytes = right.bytes_value
return Order._compare_to(left_bytes, right_bytes)
@staticmethod
def compare_timestamps(left, right):
left = left.timestamp_value
right = right.timestamp_value
seconds = Order._compare_to(left.seconds or 0, right.seconds or 0)
if seconds != 0:
return seconds
return Order._compare_to(left.nanos or 0, right.nanos or 0)
@staticmethod
def compare_geo_points(left, right):
left_value = decode_value(left, None)
right_value = decode_value(right, None)
cmp = (left_value.latitude > right_value.latitude) - (
left_value.latitude < right_value.latitude
)
if cmp != 0:
return cmp
return (left_value.longitude > right_value.longitude) - (
left_value.longitude < right_value.longitude
)
@staticmethod
def compare_resource_paths(left, right):
left = left.reference_value
right = right.reference_value
left_segments = left.split("/")
right_segments = right.split("/")
shorter = min(len(left_segments), len(right_segments))
# compare segments
for i in range(shorter):
if left_segments[i] < right_segments[i]:
return -1
if left_segments[i] > right_segments[i]:
return 1
left_length = len(left)
right_length = len(right)
return (left_length > right_length) - (left_length < right_length)
@staticmethod
def compare_arrays(left, right):
l_values = left.array_value.values
r_values = right.array_value.values
length = min(len(l_values), len(r_values))
for i in range(length):
cmp = Order.compare(l_values[i], r_values[i])
if cmp != 0:
return cmp
return Order._compare_to(len(l_values), len(r_values))
@staticmethod
def compare_objects(left, right):
left_fields = left.map_value.fields
right_fields = right.map_value.fields
for left_key, right_key in zip(sorted(left_fields), sorted(right_fields)):
keyCompare = Order._compare_to(left_key, right_key)
if keyCompare != 0:
return keyCompare
value_compare = Order.compare(
left_fields[left_key], right_fields[right_key]
)
if value_compare != 0:
return value_compare
return Order._compare_to(len(left_fields), len(right_fields))
@staticmethod
def compare_numbers(left, right):
left_value = decode_value(left, None)
right_value = decode_value(right, None)
return Order.compare_doubles(left_value, right_value)
@staticmethod
def compare_doubles(left, right):
if math.isnan(left):
if math.isnan(right):
return 0
return -1
if math.isnan(right):
return 1
return Order._compare_to(left, right)
@staticmethod
def _compare_to(left, right):
# We can't just use cmp(left, right) because cmp doesn't exist
# in Python 3, so this is an equivalent suggested by
# https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
return (left > right) - (left < right)

View file

@ -0,0 +1,203 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.firestore_v1beta1.proto.admin import (
firestore_admin_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2,
)
from google.cloud.firestore_v1beta1.proto.admin import (
index_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class FirestoreAdminStub(object):
"""The Cloud Firestore Admin API.
This API provides several administrative services for Cloud Firestore.
# Concepts
Project, Database, Namespace, Collection, and Document are used as defined in
the Google Cloud Firestore API.
Operation: An Operation represents work being performed in the background.
# Services
## Index
The index service manages Cloud Firestore indexes.
Index creation is performed asynchronously.
An Operation resource is created for each such asynchronous operation.
The state of the operation (including any errors encountered)
may be queried via the Operation resource.
## Metadata
Provides metadata and statistical information about data in Cloud Firestore.
The data provided as part of this API may be stale.
## Operation
The Operations collection provides a record of actions performed for the
specified Project (including any Operations in progress). Operations are not
created directly but through calls on other collections or resources.
An Operation that is not yet done may be cancelled. The request to cancel is
asynchronous and the Operation may continue to run for some time after the
request to cancel is made.
An Operation that is done may be deleted so that it is no longer listed as
part of the Operation collection.
Operations are created by service `FirestoreAdmin`, but are accessed via
service `google.longrunning.Operations`.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateIndex = channel.unary_unary(
"/google.firestore.admin.v1beta1.FirestoreAdmin/CreateIndex",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.CreateIndexRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListIndexes = channel.unary_unary(
"/google.firestore.admin.v1beta1.FirestoreAdmin/ListIndexes",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesResponse.FromString,
)
self.GetIndex = channel.unary_unary(
"/google.firestore.admin.v1beta1.FirestoreAdmin/GetIndex",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.GetIndexRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.FromString,
)
self.DeleteIndex = channel.unary_unary(
"/google.firestore.admin.v1beta1.FirestoreAdmin/DeleteIndex",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.DeleteIndexRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class FirestoreAdminServicer(object):
"""The Cloud Firestore Admin API.
This API provides several administrative services for Cloud Firestore.
# Concepts
Project, Database, Namespace, Collection, and Document are used as defined in
the Google Cloud Firestore API.
Operation: An Operation represents work being performed in the background.
# Services
## Index
The index service manages Cloud Firestore indexes.
Index creation is performed asynchronously.
An Operation resource is created for each such asynchronous operation.
The state of the operation (including any errors encountered)
may be queried via the Operation resource.
## Metadata
Provides metadata and statistical information about data in Cloud Firestore.
The data provided as part of this API may be stale.
## Operation
The Operations collection provides a record of actions performed for the
specified Project (including any Operations in progress). Operations are not
created directly but through calls on other collections or resources.
An Operation that is not yet done may be cancelled. The request to cancel is
asynchronous and the Operation may continue to run for some time after the
request to cancel is made.
An Operation that is done may be deleted so that it is no longer listed as
part of the Operation collection.
Operations are created by service `FirestoreAdmin`, but are accessed via
service `google.longrunning.Operations`.
"""
def CreateIndex(self, request, context):
"""Creates the specified index.
A newly created index's initial state is `CREATING`. On completion of the
returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`.
If the index already exists, the call will return an `ALREADY_EXISTS`
status.
During creation, the process could result in an error, in which case the
index will move to the `ERROR` state. The process can be recovered by
fixing the data that caused the error, removing the index with
[delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with
[create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex].
Indexes with a single field cannot be created.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListIndexes(self, request, context):
"""Lists the indexes that match the specified filters.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetIndex(self, request, context):
"""Gets an index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteIndex(self, request, context):
"""Deletes an index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_FirestoreAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateIndex": grpc.unary_unary_rpc_method_handler(
servicer.CreateIndex,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.CreateIndexRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ListIndexes": grpc.unary_unary_rpc_method_handler(
servicer.ListIndexes,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.ListIndexesResponse.SerializeToString,
),
"GetIndex": grpc.unary_unary_rpc_method_handler(
servicer.GetIndex,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.GetIndexRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_index__pb2.Index.SerializeToString,
),
"DeleteIndex": grpc.unary_unary_rpc_method_handler(
servicer.DeleteIndex,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_admin_dot_firestore__admin__pb2.DeleteIndexRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.firestore.admin.v1beta1.FirestoreAdmin", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))

View file

@ -0,0 +1,300 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/admin/index.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_v1beta1/proto/admin/index.proto",
package="google.firestore.admin.v1beta1",
syntax="proto3",
serialized_pb=_b(
'\n6google/cloud/firestore_v1beta1/proto/admin/index.proto\x12\x1egoogle.firestore.admin.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x9c\x01\n\nIndexField\x12\x12\n\nfield_path\x18\x01 \x01(\t\x12=\n\x04mode\x18\x02 \x01(\x0e\x32/.google.firestore.admin.v1beta1.IndexField.Mode";\n\x04Mode\x12\x14\n\x10MODE_UNSPECIFIED\x10\x00\x12\r\n\tASCENDING\x10\x02\x12\x0e\n\nDESCENDING\x10\x03"\xe8\x01\n\x05Index\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rcollection_id\x18\x02 \x01(\t\x12:\n\x06\x66ields\x18\x03 \x03(\x0b\x32*.google.firestore.admin.v1beta1.IndexField\x12:\n\x05state\x18\x06 \x01(\x0e\x32+.google.firestore.admin.v1beta1.Index.State"B\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x03\x12\t\n\x05READY\x10\x02\x12\t\n\x05\x45RROR\x10\x05\x42\xa5\x01\n"com.google.firestore.admin.v1beta1B\nIndexProtoP\x01ZCgoogle.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin\xa2\x02\x04GCFS\xaa\x02$Google.Cloud.Firestore.Admin.V1Beta1b\x06proto3'
),
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR],
)
_INDEXFIELD_MODE = _descriptor.EnumDescriptor(
name="Mode",
full_name="google.firestore.admin.v1beta1.IndexField.Mode",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="MODE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ASCENDING", index=1, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DESCENDING", index=2, number=3, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=218,
serialized_end=277,
)
_sym_db.RegisterEnumDescriptor(_INDEXFIELD_MODE)
_INDEX_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.firestore.admin.v1beta1.Index.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STATE_UNSPECIFIED", index=0, number=0, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="CREATING", index=1, number=3, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="READY", index=2, number=2, options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="ERROR", index=3, number=5, options=None, type=None
),
],
containing_type=None,
options=None,
serialized_start=446,
serialized_end=512,
)
_sym_db.RegisterEnumDescriptor(_INDEX_STATE)
_INDEXFIELD = _descriptor.Descriptor(
name="IndexField",
full_name="google.firestore.admin.v1beta1.IndexField",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="field_path",
full_name="google.firestore.admin.v1beta1.IndexField.field_path",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="mode",
full_name="google.firestore.admin.v1beta1.IndexField.mode",
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_INDEXFIELD_MODE],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=121,
serialized_end=277,
)
_INDEX = _descriptor.Descriptor(
name="Index",
full_name="google.firestore.admin.v1beta1.Index",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.firestore.admin.v1beta1.Index.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="collection_id",
full_name="google.firestore.admin.v1beta1.Index.collection_id",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="fields",
full_name="google.firestore.admin.v1beta1.Index.fields",
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.firestore.admin.v1beta1.Index.state",
index=3,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[_INDEX_STATE],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=280,
serialized_end=512,
)
_INDEXFIELD.fields_by_name["mode"].enum_type = _INDEXFIELD_MODE
_INDEXFIELD_MODE.containing_type = _INDEXFIELD
_INDEX.fields_by_name["fields"].message_type = _INDEXFIELD
_INDEX.fields_by_name["state"].enum_type = _INDEX_STATE
_INDEX_STATE.containing_type = _INDEX
DESCRIPTOR.message_types_by_name["IndexField"] = _INDEXFIELD
DESCRIPTOR.message_types_by_name["Index"] = _INDEX
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IndexField = _reflection.GeneratedProtocolMessageType(
"IndexField",
(_message.Message,),
dict(
DESCRIPTOR=_INDEXFIELD,
__module__="google.cloud.firestore_v1beta1.proto.admin.index_pb2",
__doc__="""A field of an index.
Attributes:
field_path:
The path of the field. Must match the field path specification
described by
[google.firestore.v1beta1.Document.fields][fields]. Special
field path ``__name__`` may be used by itself or at the end of
a path. ``__type__`` may be used only at the end of path.
mode:
The field's mode.
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.IndexField)
),
)
_sym_db.RegisterMessage(IndexField)
Index = _reflection.GeneratedProtocolMessageType(
"Index",
(_message.Message,),
dict(
DESCRIPTOR=_INDEX,
__module__="google.cloud.firestore_v1beta1.proto.admin.index_pb2",
__doc__="""An index definition.
Attributes:
name:
The resource name of the index.
collection_id:
The collection ID to which this index applies. Required.
fields:
The fields to index.
state:
The state of the index. The state is read-only. @OutputOnly
""",
# @@protoc_insertion_point(class_scope:google.firestore.admin.v1beta1.Index)
),
)
_sym_db.RegisterMessage(Index)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
'\n"com.google.firestore.admin.v1beta1B\nIndexProtoP\001ZCgoogle.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin\242\002\004GCFS\252\002$Google.Cloud.Firestore.Admin.V1Beta1'
),
)
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,2 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc

View file

@ -0,0 +1,84 @@
// Copyright 2019 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.v1beta1;
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore";
option java_multiple_files = true;
option java_outer_classname = "CommonProto";
option java_package = "com.google.firestore.v1beta1";
option objc_class_prefix = "GCFS";
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
option ruby_package = "Google::Cloud::Firestore::V1beta1";
// A set of field paths on a document.
// Used to restrict a get or update operation on a document to a subset of its
// fields.
// This is different from standard field masks, as this is always scoped to a
// [Document][google.firestore.v1beta1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1beta1.Value].
message DocumentMask {
// The list of field paths in the mask. See [Document.fields][google.firestore.v1beta1.Document.fields] for a field
// path syntax reference.
repeated string field_paths = 1;
}
// A precondition on a document, used for conditional operations.
message Precondition {
// The type of precondition.
oneof condition_type {
// When set to `true`, the target document must exist.
// When set to `false`, the target document must not exist.
bool exists = 1;
// When set, the target document must exist and have been last updated at
// that time.
google.protobuf.Timestamp update_time = 2;
}
}
// Options for creating a new transaction.
message TransactionOptions {
// Options for a transaction that can be used to read and write documents.
message ReadWrite {
// An optional transaction to retry.
bytes retry_transaction = 1;
}
// Options for a transaction that can only be used to read documents.
message ReadOnly {
// The consistency mode for this transaction. If not set, defaults to strong
// consistency.
oneof consistency_selector {
// Reads documents at the given time.
// This may not be older than 60 seconds.
google.protobuf.Timestamp read_time = 2;
}
}
// The mode of the transaction.
oneof mode {
// The transaction can only be used for read operations.
ReadOnly read_only = 2;
// The transaction can be used for both read and write operations.
ReadWrite read_write = 3;
}
}

View file

@ -0,0 +1,456 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/common.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_v1beta1/proto/common.proto",
package="google.firestore.v1beta1",
syntax="proto3",
serialized_options=b"\n\034com.google.firestore.v1beta1B\013CommonProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1\352\002!Google::Cloud::Firestore::V1beta1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n1google/cloud/firestore_v1beta1/proto/common.proto\x12\x18google.firestore.v1beta1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"#\n\x0c\x44ocumentMask\x12\x13\n\x0b\x66ield_paths\x18\x01 \x03(\t"e\n\x0cPrecondition\x12\x10\n\x06\x65xists\x18\x01 \x01(\x08H\x00\x12\x31\n\x0bupdate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x10\n\x0e\x63ondition_type"\xb3\x02\n\x12TransactionOptions\x12J\n\tread_only\x18\x02 \x01(\x0b\x32\x35.google.firestore.v1beta1.TransactionOptions.ReadOnlyH\x00\x12L\n\nread_write\x18\x03 \x01(\x0b\x32\x36.google.firestore.v1beta1.TransactionOptions.ReadWriteH\x00\x1a&\n\tReadWrite\x12\x19\n\x11retry_transaction\x18\x01 \x01(\x0c\x1aS\n\x08ReadOnly\x12/\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x16\n\x14\x63onsistency_selectorB\x06\n\x04modeB\xdd\x01\n\x1c\x63om.google.firestore.v1beta1B\x0b\x43ommonProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1\xea\x02!Google::Cloud::Firestore::V1beta1b\x06proto3',
dependencies=[
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_DOCUMENTMASK = _descriptor.Descriptor(
name="DocumentMask",
full_name="google.firestore.v1beta1.DocumentMask",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="field_paths",
full_name="google.firestore.v1beta1.DocumentMask.field_paths",
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=142,
serialized_end=177,
)
_PRECONDITION = _descriptor.Descriptor(
name="Precondition",
full_name="google.firestore.v1beta1.Precondition",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="exists",
full_name="google.firestore.v1beta1.Precondition.exists",
index=0,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.firestore.v1beta1.Precondition.update_time",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="condition_type",
full_name="google.firestore.v1beta1.Precondition.condition_type",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=179,
serialized_end=280,
)
_TRANSACTIONOPTIONS_READWRITE = _descriptor.Descriptor(
name="ReadWrite",
full_name="google.firestore.v1beta1.TransactionOptions.ReadWrite",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="retry_transaction",
full_name="google.firestore.v1beta1.TransactionOptions.ReadWrite.retry_transaction",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=459,
serialized_end=497,
)
_TRANSACTIONOPTIONS_READONLY = _descriptor.Descriptor(
name="ReadOnly",
full_name="google.firestore.v1beta1.TransactionOptions.ReadOnly",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="read_time",
full_name="google.firestore.v1beta1.TransactionOptions.ReadOnly.read_time",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="consistency_selector",
full_name="google.firestore.v1beta1.TransactionOptions.ReadOnly.consistency_selector",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=499,
serialized_end=582,
)
_TRANSACTIONOPTIONS = _descriptor.Descriptor(
name="TransactionOptions",
full_name="google.firestore.v1beta1.TransactionOptions",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="read_only",
full_name="google.firestore.v1beta1.TransactionOptions.read_only",
index=0,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="read_write",
full_name="google.firestore.v1beta1.TransactionOptions.read_write",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_TRANSACTIONOPTIONS_READWRITE, _TRANSACTIONOPTIONS_READONLY,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="mode",
full_name="google.firestore.v1beta1.TransactionOptions.mode",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=283,
serialized_end=590,
)
_PRECONDITION.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_PRECONDITION.oneofs_by_name["condition_type"].fields.append(
_PRECONDITION.fields_by_name["exists"]
)
_PRECONDITION.fields_by_name["exists"].containing_oneof = _PRECONDITION.oneofs_by_name[
"condition_type"
]
_PRECONDITION.oneofs_by_name["condition_type"].fields.append(
_PRECONDITION.fields_by_name["update_time"]
)
_PRECONDITION.fields_by_name[
"update_time"
].containing_oneof = _PRECONDITION.oneofs_by_name["condition_type"]
_TRANSACTIONOPTIONS_READWRITE.containing_type = _TRANSACTIONOPTIONS
_TRANSACTIONOPTIONS_READONLY.fields_by_name[
"read_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_TRANSACTIONOPTIONS_READONLY.containing_type = _TRANSACTIONOPTIONS
_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["consistency_selector"].fields.append(
_TRANSACTIONOPTIONS_READONLY.fields_by_name["read_time"]
)
_TRANSACTIONOPTIONS_READONLY.fields_by_name[
"read_time"
].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["consistency_selector"]
_TRANSACTIONOPTIONS.fields_by_name[
"read_only"
].message_type = _TRANSACTIONOPTIONS_READONLY
_TRANSACTIONOPTIONS.fields_by_name[
"read_write"
].message_type = _TRANSACTIONOPTIONS_READWRITE
_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append(
_TRANSACTIONOPTIONS.fields_by_name["read_only"]
)
_TRANSACTIONOPTIONS.fields_by_name[
"read_only"
].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"]
_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append(
_TRANSACTIONOPTIONS.fields_by_name["read_write"]
)
_TRANSACTIONOPTIONS.fields_by_name[
"read_write"
].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"]
DESCRIPTOR.message_types_by_name["DocumentMask"] = _DOCUMENTMASK
DESCRIPTOR.message_types_by_name["Precondition"] = _PRECONDITION
DESCRIPTOR.message_types_by_name["TransactionOptions"] = _TRANSACTIONOPTIONS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DocumentMask = _reflection.GeneratedProtocolMessageType(
"DocumentMask",
(_message.Message,),
{
"DESCRIPTOR": _DOCUMENTMASK,
"__module__": "google.cloud.firestore_v1beta1.proto.common_pb2",
"__doc__": """A set of field paths on a document. Used to restrict a get or update
operation on a document to a subset of its fields. This is different
from standard field masks, as this is always scoped to a
[Document][google.firestore.v1beta1.Document], and takes in account
the dynamic nature of [Value][google.firestore.v1beta1.Value].
Attributes:
field_paths:
The list of field paths in the mask. See
[Document.fields][google.firestore.v1beta1.Document.fields]
for a field path syntax reference.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.DocumentMask)
},
)
_sym_db.RegisterMessage(DocumentMask)
Precondition = _reflection.GeneratedProtocolMessageType(
"Precondition",
(_message.Message,),
{
"DESCRIPTOR": _PRECONDITION,
"__module__": "google.cloud.firestore_v1beta1.proto.common_pb2",
"__doc__": """A precondition on a document, used for conditional operations.
Attributes:
condition_type:
The type of precondition.
exists:
When set to ``true``, the target document must exist. When set
to ``false``, the target document must not exist.
update_time:
When set, the target document must exist and have been last
updated at that time.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Precondition)
},
)
_sym_db.RegisterMessage(Precondition)
TransactionOptions = _reflection.GeneratedProtocolMessageType(
"TransactionOptions",
(_message.Message,),
{
"ReadWrite": _reflection.GeneratedProtocolMessageType(
"ReadWrite",
(_message.Message,),
{
"DESCRIPTOR": _TRANSACTIONOPTIONS_READWRITE,
"__module__": "google.cloud.firestore_v1beta1.proto.common_pb2",
"__doc__": """Options for a transaction that can be used to read and write
documents.
Attributes:
retry_transaction:
An optional transaction to retry.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TransactionOptions.ReadWrite)
},
),
"ReadOnly": _reflection.GeneratedProtocolMessageType(
"ReadOnly",
(_message.Message,),
{
"DESCRIPTOR": _TRANSACTIONOPTIONS_READONLY,
"__module__": "google.cloud.firestore_v1beta1.proto.common_pb2",
"__doc__": """Options for a transaction that can only be used to read documents.
Attributes:
consistency_selector:
The consistency mode for this transaction. If not set,
defaults to strong consistency.
read_time:
Reads documents at the given time. This may not be older than
60 seconds.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TransactionOptions.ReadOnly)
},
),
"DESCRIPTOR": _TRANSACTIONOPTIONS,
"__module__": "google.cloud.firestore_v1beta1.proto.common_pb2",
"__doc__": """Options for creating a new transaction.
Attributes:
mode:
The mode of the transaction.
read_only:
The transaction can only be used for read operations.
read_write:
The transaction can be used for both read and write
operations.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.TransactionOptions)
},
)
_sym_db.RegisterMessage(TransactionOptions)
_sym_db.RegisterMessage(TransactionOptions.ReadWrite)
_sym_db.RegisterMessage(TransactionOptions.ReadOnly)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,3 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

View file

@ -0,0 +1,151 @@
// Copyright 2019 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.v1beta1;
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
import "google/type/latlng.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore";
option java_multiple_files = true;
option java_outer_classname = "DocumentProto";
option java_package = "com.google.firestore.v1beta1";
option objc_class_prefix = "GCFS";
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
option ruby_package = "Google::Cloud::Firestore::V1beta1";
// A Firestore document.
//
// Must not exceed 1 MiB - 4 bytes.
message Document {
// The resource name of the document, for example
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
string name = 1;
// The document's fields.
//
// The map keys represent field names.
//
// A simple field name contains only characters `a` to `z`, `A` to `Z`,
// `0` to `9`, or `_`, and must not start with `0` to `9`. For example,
// `foo_bar_17`.
//
// Field names matching the regular expression `__.*__` are reserved. Reserved
// field names are forbidden except in certain documented contexts. The map
// keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be
// empty.
//
// Field paths may be used in other contexts to refer to structured fields
// defined here. For `map_value`, the field path is represented by the simple
// or quoted field names of the containing fields, delimited by `.`. For
// example, the structured field
// `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be
// represented by the field path `foo.x&y`.
//
// Within a field path, a quoted field name starts and ends with `` ` `` and
// may contain any character. Some characters, including `` ` ``, must be
// escaped using a `\`. For example, `` `x&y` `` represents `x&y` and
// `` `bak\`tik` `` represents `` bak`tik ``.
map<string, Value> fields = 2;
// Output only. The time at which the document was created.
//
// This value increases monotonically when a document is deleted then
// recreated. It can also be compared to values from other documents and
// the `read_time` of a query.
google.protobuf.Timestamp create_time = 3;
// Output only. The time at which the document was last changed.
//
// This value is initially set to the `create_time` then increases
// monotonically with each change to the document. It can also be
// compared to values from other documents and the `read_time` of a query.
google.protobuf.Timestamp update_time = 4;
}
// A message that can hold any of the supported value types.
message Value {
// Must have a value set.
oneof value_type {
// A null value.
google.protobuf.NullValue null_value = 11;
// A boolean value.
bool boolean_value = 1;
// An integer value.
int64 integer_value = 2;
// A double value.
double double_value = 3;
// A timestamp value.
//
// Precise only to microseconds. When stored, any additional precision is
// rounded down.
google.protobuf.Timestamp timestamp_value = 10;
// A string value.
//
// The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes.
// Only the first 1,500 bytes of the UTF-8 representation are considered by
// queries.
string string_value = 17;
// A bytes value.
//
// Must not exceed 1 MiB - 89 bytes.
// Only the first 1,500 bytes are considered by queries.
bytes bytes_value = 18;
// A reference to a document. For example:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
string reference_value = 5;
// A geo point value representing a point on the surface of Earth.
google.type.LatLng geo_point_value = 8;
// An array value.
//
// Cannot directly contain another array value, though can contain an
// map which contains another array.
ArrayValue array_value = 9;
// A map value.
MapValue map_value = 6;
}
}
// An array value.
message ArrayValue {
// Values in the array.
repeated Value values = 1;
}
// A map value.
message MapValue {
// The map's fields.
//
// The map keys represent field names. Field names matching the regular
// expression `__.*__` are reserved. Reserved field names are forbidden except
// in certain documented contexts. The map keys, represented as UTF-8, must
// not exceed 1,500 bytes and cannot be empty.
map<string, Value> fields = 1;
}

View file

@ -0,0 +1,815 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/document.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_v1beta1/proto/document.proto",
package="google.firestore.v1beta1",
syntax="proto3",
serialized_options=b"\n\034com.google.firestore.v1beta1B\rDocumentProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\242\002\004GCFS\252\002\036Google.Cloud.Firestore.V1Beta1\312\002\036Google\\Cloud\\Firestore\\V1beta1\352\002!Google::Cloud::Firestore::V1beta1",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n3google/cloud/firestore_v1beta1/proto/document.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x18google/type/latlng.proto\x1a\x1cgoogle/api/annotations.proto"\x8a\x02\n\x08\x44ocument\x12\x0c\n\x04name\x18\x01 \x01(\t\x12>\n\x06\x66ields\x18\x02 \x03(\x0b\x32..google.firestore.v1beta1.Document.FieldsEntry\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1aN\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.google.firestore.v1beta1.Value:\x02\x38\x01"\xb8\x03\n\x05Value\x12\x30\n\nnull_value\x18\x0b \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x17\n\rboolean_value\x18\x01 \x01(\x08H\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x16\n\x0cstring_value\x18\x11 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x12 \x01(\x0cH\x00\x12\x19\n\x0freference_value\x18\x05 \x01(\tH\x00\x12.\n\x0fgeo_point_value\x18\x08 \x01(\x0b\x32\x13.google.type.LatLngH\x00\x12;\n\x0b\x61rray_value\x18\t \x01(\x0b\x32$.google.firestore.v1beta1.ArrayValueH\x00\x12\x37\n\tmap_value\x18\x06 \x01(\x0b\x32".google.firestore.v1beta1.MapValueH\x00\x42\x0c\n\nvalue_type"=\n\nArrayValue\x12/\n\x06values\x18\x01 \x03(\x0b\x32\x1f.google.firestore.v1beta1.Value"\x9a\x01\n\x08MapValue\x12>\n\x06\x66ields\x18\x01 \x03(\x0b\x32..google.firestore.v1beta1.MapValue.FieldsEntry\x1aN\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.google.firestore.v1beta1.Value:\x02\x38\x01\x42\xdf\x01\n\x1c\x63om.google.firestore.v1beta1B\rDocumentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xa2\x02\x04GCFS\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1\xca\x02\x1eGoogle\\Cloud\\Firestore\\V1beta1\xea\x02!Google::Cloud::Firestore::V1beta1b\x06proto3',
dependencies=[
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_type_dot_latlng__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_DOCUMENT_FIELDSENTRY = _descriptor.Descriptor(
name="FieldsEntry",
full_name="google.firestore.v1beta1.Document.FieldsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.firestore.v1beta1.Document.FieldsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.firestore.v1beta1.Document.FieldsEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=389,
serialized_end=467,
)
_DOCUMENT = _descriptor.Descriptor(
name="Document",
full_name="google.firestore.v1beta1.Document",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.firestore.v1beta1.Document.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="fields",
full_name="google.firestore.v1beta1.Document.fields",
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="create_time",
full_name="google.firestore.v1beta1.Document.create_time",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="update_time",
full_name="google.firestore.v1beta1.Document.update_time",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_DOCUMENT_FIELDSENTRY,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=201,
serialized_end=467,
)
_VALUE = _descriptor.Descriptor(
name="Value",
full_name="google.firestore.v1beta1.Value",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="null_value",
full_name="google.firestore.v1beta1.Value.null_value",
index=0,
number=11,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="boolean_value",
full_name="google.firestore.v1beta1.Value.boolean_value",
index=1,
number=1,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="integer_value",
full_name="google.firestore.v1beta1.Value.integer_value",
index=2,
number=2,
type=3,
cpp_type=2,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="double_value",
full_name="google.firestore.v1beta1.Value.double_value",
index=3,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="timestamp_value",
full_name="google.firestore.v1beta1.Value.timestamp_value",
index=4,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="string_value",
full_name="google.firestore.v1beta1.Value.string_value",
index=5,
number=17,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="bytes_value",
full_name="google.firestore.v1beta1.Value.bytes_value",
index=6,
number=18,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="reference_value",
full_name="google.firestore.v1beta1.Value.reference_value",
index=7,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="geo_point_value",
full_name="google.firestore.v1beta1.Value.geo_point_value",
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="array_value",
full_name="google.firestore.v1beta1.Value.array_value",
index=9,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="map_value",
full_name="google.firestore.v1beta1.Value.map_value",
index=10,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="value_type",
full_name="google.firestore.v1beta1.Value.value_type",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=470,
serialized_end=910,
)
_ARRAYVALUE = _descriptor.Descriptor(
name="ArrayValue",
full_name="google.firestore.v1beta1.ArrayValue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="values",
full_name="google.firestore.v1beta1.ArrayValue.values",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=912,
serialized_end=973,
)
_MAPVALUE_FIELDSENTRY = _descriptor.Descriptor(
name="FieldsEntry",
full_name="google.firestore.v1beta1.MapValue.FieldsEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="google.firestore.v1beta1.MapValue.FieldsEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="google.firestore.v1beta1.MapValue.FieldsEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=b"8\001",
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=389,
serialized_end=467,
)
_MAPVALUE = _descriptor.Descriptor(
name="MapValue",
full_name="google.firestore.v1beta1.MapValue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="fields",
full_name="google.firestore.v1beta1.MapValue.fields",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[_MAPVALUE_FIELDSENTRY,],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=976,
serialized_end=1130,
)
_DOCUMENT_FIELDSENTRY.fields_by_name["value"].message_type = _VALUE
_DOCUMENT_FIELDSENTRY.containing_type = _DOCUMENT
_DOCUMENT.fields_by_name["fields"].message_type = _DOCUMENT_FIELDSENTRY
_DOCUMENT.fields_by_name[
"create_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_DOCUMENT.fields_by_name[
"update_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VALUE.fields_by_name[
"null_value"
].enum_type = google_dot_protobuf_dot_struct__pb2._NULLVALUE
_VALUE.fields_by_name[
"timestamp_value"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_VALUE.fields_by_name[
"geo_point_value"
].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_VALUE.fields_by_name["array_value"].message_type = _ARRAYVALUE
_VALUE.fields_by_name["map_value"].message_type = _MAPVALUE
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["null_value"])
_VALUE.fields_by_name["null_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(
_VALUE.fields_by_name["boolean_value"]
)
_VALUE.fields_by_name["boolean_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(
_VALUE.fields_by_name["integer_value"]
)
_VALUE.fields_by_name["integer_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["double_value"])
_VALUE.fields_by_name["double_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(
_VALUE.fields_by_name["timestamp_value"]
)
_VALUE.fields_by_name["timestamp_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["string_value"])
_VALUE.fields_by_name["string_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["bytes_value"])
_VALUE.fields_by_name["bytes_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(
_VALUE.fields_by_name["reference_value"]
)
_VALUE.fields_by_name["reference_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(
_VALUE.fields_by_name["geo_point_value"]
)
_VALUE.fields_by_name["geo_point_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["array_value"])
_VALUE.fields_by_name["array_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["map_value"])
_VALUE.fields_by_name["map_value"].containing_oneof = _VALUE.oneofs_by_name[
"value_type"
]
_ARRAYVALUE.fields_by_name["values"].message_type = _VALUE
_MAPVALUE_FIELDSENTRY.fields_by_name["value"].message_type = _VALUE
_MAPVALUE_FIELDSENTRY.containing_type = _MAPVALUE
_MAPVALUE.fields_by_name["fields"].message_type = _MAPVALUE_FIELDSENTRY
DESCRIPTOR.message_types_by_name["Document"] = _DOCUMENT
DESCRIPTOR.message_types_by_name["Value"] = _VALUE
DESCRIPTOR.message_types_by_name["ArrayValue"] = _ARRAYVALUE
DESCRIPTOR.message_types_by_name["MapValue"] = _MAPVALUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Document = _reflection.GeneratedProtocolMessageType(
"Document",
(_message.Message,),
{
"FieldsEntry": _reflection.GeneratedProtocolMessageType(
"FieldsEntry",
(_message.Message,),
{
"DESCRIPTOR": _DOCUMENT_FIELDSENTRY,
"__module__": "google.cloud.firestore_v1beta1.proto.document_pb2"
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Document.FieldsEntry)
},
),
"DESCRIPTOR": _DOCUMENT,
"__module__": "google.cloud.firestore_v1beta1.proto.document_pb2",
"__doc__": """A Firestore document. Must not exceed 1 MiB - 4 bytes.
Attributes:
name:
The resource name of the document, for example ``projects/{pro
ject_id}/databases/{database_id}/documents/{document_path}``.
fields:
The documents fields. The map keys represent field names. A
simple field name contains only characters ``a`` to ``z``,
``A`` to ``Z``, ``0`` to ``9``, or ``_``, and must not start
with ``0`` to ``9``. For example, ``foo_bar_17``. Field names
matching the regular expression ``__.*__`` are reserved.
Reserved field names are forbidden except in certain
documented contexts. The map keys, represented as UTF-8, must
not exceed 1,500 bytes and cannot be empty. Field paths may
be used in other contexts to refer to structured fields
defined here. For ``map_value``, the field path is represented
by the simple or quoted field names of the containing fields,
delimited by ``.``. For example, the structured field ``"foo"
: { map_value: { "x&y" : { string_value: "hello" }}}`` would
be represented by the field path ``foo.x&y``. Within a field
path, a quoted field name starts and ends with :literal:`\``
and may contain any character. Some characters, including
:literal:`\``, must be escaped using a ``\``. For example,
:literal:`\`x&y\`` represents ``x&y`` and
:literal:`\`bak\`tik\`` represents :literal:`bak`tik`.
create_time:
Output only. The time at which the document was created. This
value increases monotonically when a document is deleted then
recreated. It can also be compared to values from other
documents and the ``read_time`` of a query.
update_time:
Output only. The time at which the document was last changed.
This value is initially set to the ``create_time`` then
increases monotonically with each change to the document. It
can also be compared to values from other documents and the
``read_time`` of a query.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Document)
},
)
_sym_db.RegisterMessage(Document)
_sym_db.RegisterMessage(Document.FieldsEntry)
Value = _reflection.GeneratedProtocolMessageType(
"Value",
(_message.Message,),
{
"DESCRIPTOR": _VALUE,
"__module__": "google.cloud.firestore_v1beta1.proto.document_pb2",
"__doc__": """A message that can hold any of the supported value types.
Attributes:
value_type:
Must have a value set.
null_value:
A null value.
boolean_value:
A boolean value.
integer_value:
An integer value.
double_value:
A double value.
timestamp_value:
A timestamp value. Precise only to microseconds. When stored,
any additional precision is rounded down.
string_value:
A string value. The string, represented as UTF-8, must not
exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the
UTF-8 representation are considered by queries.
bytes_value:
A bytes value. Must not exceed 1 MiB - 89 bytes. Only the
first 1,500 bytes are considered by queries.
reference_value:
A reference to a document. For example: ``projects/{project_id
}/databases/{database_id}/documents/{document_path}``.
geo_point_value:
A geo point value representing a point on the surface of
Earth.
array_value:
An array value. Cannot directly contain another array value,
though can contain an map which contains another array.
map_value:
A map value.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.Value)
},
)
_sym_db.RegisterMessage(Value)
ArrayValue = _reflection.GeneratedProtocolMessageType(
"ArrayValue",
(_message.Message,),
{
"DESCRIPTOR": _ARRAYVALUE,
"__module__": "google.cloud.firestore_v1beta1.proto.document_pb2",
"__doc__": """An array value.
Attributes:
values:
Values in the array.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.ArrayValue)
},
)
_sym_db.RegisterMessage(ArrayValue)
MapValue = _reflection.GeneratedProtocolMessageType(
"MapValue",
(_message.Message,),
{
"FieldsEntry": _reflection.GeneratedProtocolMessageType(
"FieldsEntry",
(_message.Message,),
{
"DESCRIPTOR": _MAPVALUE_FIELDSENTRY,
"__module__": "google.cloud.firestore_v1beta1.proto.document_pb2"
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.MapValue.FieldsEntry)
},
),
"DESCRIPTOR": _MAPVALUE,
"__module__": "google.cloud.firestore_v1beta1.proto.document_pb2",
"__doc__": """A map value.
Attributes:
fields:
The maps fields. The map keys represent field names. Field
names matching the regular expression ``__.*__`` are reserved.
Reserved field names are forbidden except in certain
documented contexts. The map keys, represented as UTF-8, must
not exceed 1,500 bytes and cannot be empty.
""",
# @@protoc_insertion_point(class_scope:google.firestore.v1beta1.MapValue)
},
)
_sym_db.RegisterMessage(MapValue)
_sym_db.RegisterMessage(MapValue.FieldsEntry)
DESCRIPTOR._options = None
_DOCUMENT_FIELDSENTRY._options = None
_MAPVALUE_FIELDSENTRY._options = None
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,3 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

View file

@ -0,0 +1,62 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.firestore_v1beta1.proto import (
common_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2,
)
from google.cloud.firestore_v1beta1.proto import (
document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/firestore_v1beta1/proto/event_flow_document_change.proto",
package="google.firestore.v1beta1",
syntax="proto3",
serialized_pb=_b(
"\nEgoogle/cloud/firestore_v1beta1/proto/event_flow_document_change.proto\x12\x18google.firestore.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/firestore_v1beta1/proto/common.proto\x1a\x33google/cloud/firestore_v1beta1/proto/document.protoB\xa2\x01\n\x1c\x63om.google.firestore.v1beta1B\x1c\x45ventFlowDocumentChangeProtoP\x01ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\xaa\x02\x1eGoogle.Cloud.Firestore.V1Beta1b\x06proto3"
),
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_common__pb2.DESCRIPTOR,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.DESCRIPTOR,
],
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(
descriptor_pb2.FileOptions(),
_b(
"\n\034com.google.firestore.v1beta1B\034EventFlowDocumentChangeProtoP\001ZAgoogle.golang.org/genproto/googleapis/firestore/v1beta1;firestore\252\002\036Google.Cloud.Firestore.V1Beta1"
),
)
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,2 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc

View file

@ -0,0 +1,95 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.admin.v1beta2;
import "google/api/annotations.proto";
import "google/firestore/admin/v1beta2/index.proto";
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2";
option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin";
option java_multiple_files = true;
option java_outer_classname = "FieldProto";
option java_package = "com.google.firestore.admin.v1beta2";
option objc_class_prefix = "GCFS";
// Represents a single field in the database.
//
// Fields are grouped by their "Collection Group", which represent all
// collections in the database with the same id.
message Field {
// The index configuration for this field.
message IndexConfig {
// The indexes supported for this field.
repeated Index indexes = 1;
// Output only.
// When true, the `Field`'s index configuration is set from the
// configuration specified by the `ancestor_field`.
// When false, the `Field`'s index configuration is defined explicitly.
bool uses_ancestor_config = 2;
// Output only.
// Specifies the resource name of the `Field` from which this field's
// index configuration is set (when `uses_ancestor_config` is true),
// or from which it *would* be set if this field had no index configuration
// (when `uses_ancestor_config` is false).
string ancestor_field = 3;
// Output only
// When true, the `Field`'s index configuration is in the process of being
// reverted. Once complete, the index config will transition to the same
// state as the field specified by `ancestor_field`, at which point
// `uses_ancestor_config` will be `true` and `reverting` will be `false`.
bool reverting = 4;
}
// A field name of the form
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}`
//
// A field path may be a simple field name, e.g. `address` or a path to fields
// within map_value , e.g. `address.city`,
// or a special field path. The only valid special field is `*`, which
// represents any field.
//
// Field paths may be quoted using ` (backtick). The only character that needs
// to be escaped within a quoted field path is the backtick character itself,
// escaped using a backslash. Special characters in field paths that
// must be quoted include: `*`, `.`,
// ``` (backtick), `[`, `]`, as well as any ascii symbolic characters.
//
// Examples:
// (Note: Comments here are written in markdown syntax, so there is an
// additional layer of backticks to represent a code block)
// `\`address.city\`` represents a field named `address.city`, not the map key
// `city` in the field `address`.
// `\`*\`` represents a field named `*`, not any field.
//
// A special `Field` contains the default indexing settings for all fields.
// This field's resource name is:
// `projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`
// Indexes defined on this `Field` will be applied to all fields which do not
// have their own `Field` index configuration.
string name = 1;
// The index configuration for this field. If unset, field indexing will
// revert to the configuration defined by the `ancestor_field`. To
// explicitly remove all indexes for this field, specify an index config
// with an empty list of indexes.
IndexConfig index_config = 2;
}

View file

@ -0,0 +1,766 @@
// Copyright 2019 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.v1beta1;
import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/firestore/v1beta1/common.proto";
import "google/firestore/v1beta1/document.proto";
import "google/firestore/v1beta1/query.proto";
import "google/firestore/v1beta1/write.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore";
option java_multiple_files = true;
option java_outer_classname = "FirestoreProto";
option java_package = "com.google.firestore.v1beta1";
option objc_class_prefix = "GCFS";
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
option ruby_package = "Google::Cloud::Firestore::V1beta1";
// Specification of the Firestore API.
// The Cloud Firestore service.
//
// This service exposes several types of comparable timestamps:
//
// * `create_time` - The time at which a document was created. Changes only
// when a document is deleted, then re-created. Increases in a strict
// monotonic fashion.
// * `update_time` - The time at which a document was last updated. Changes
// every time a document is modified. Does not change when a write results
// in no modifications. Increases in a strict monotonic fashion.
// * `read_time` - The time at which a particular state was observed. Used
// to denote a consistent snapshot of the database or the time at which a
// Document was observed to not exist.
// * `commit_time` - The time at which the writes in a transaction were
// committed. Any read with an equal or greater `read_time` is guaranteed
// to see the effects of the transaction.
service Firestore {
option (google.api.default_host) = "firestore.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/datastore";
// Gets a single document.
rpc GetDocument(GetDocumentRequest) returns (Document) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/databases/*/documents/*/**}"
};
}
// Lists documents.
rpc ListDocuments(ListDocumentsRequest) returns (ListDocumentsResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
};
}
// Creates a new document.
rpc CreateDocument(CreateDocumentRequest) returns (Document) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/databases/*/documents/**}/{collection_id}"
body: "document"
};
}
// Updates or inserts a document.
rpc UpdateDocument(UpdateDocumentRequest) returns (Document) {
option (google.api.http) = {
patch: "/v1beta1/{document.name=projects/*/databases/*/documents/*/**}"
body: "document"
};
option (google.api.method_signature) = "document,update_mask";
}
// Deletes a document.
rpc DeleteDocument(DeleteDocumentRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/databases/*/documents/*/**}"
};
option (google.api.method_signature) = "name";
}
// Gets multiple documents.
//
// Documents returned by this method are not guaranteed to be returned in the
// same order that they were requested.
rpc BatchGetDocuments(BatchGetDocumentsRequest) returns (stream BatchGetDocumentsResponse) {
option (google.api.http) = {
post: "/v1beta1/{database=projects/*/databases/*}/documents:batchGet"
body: "*"
};
}
// Starts a new transaction.
rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
option (google.api.http) = {
post: "/v1beta1/{database=projects/*/databases/*}/documents:beginTransaction"
body: "*"
};
option (google.api.method_signature) = "database";
}
// Commits a transaction, while optionally updating documents.
rpc Commit(CommitRequest) returns (CommitResponse) {
option (google.api.http) = {
post: "/v1beta1/{database=projects/*/databases/*}/documents:commit"
body: "*"
};
option (google.api.method_signature) = "database,writes";
}
// Rolls back a transaction.
rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/v1beta1/{database=projects/*/databases/*}/documents:rollback"
body: "*"
};
option (google.api.method_signature) = "database,transaction";
}
// Runs a query.
rpc RunQuery(RunQueryRequest) returns (stream RunQueryResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/databases/*/documents}:runQuery"
body: "*"
additional_bindings {
post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:runQuery"
body: "*"
}
};
}
// Streams batches of document updates and deletes, in order.
rpc Write(stream WriteRequest) returns (stream WriteResponse) {
option (google.api.http) = {
post: "/v1beta1/{database=projects/*/databases/*}/documents:write"
body: "*"
};
}
// Listens to changes.
rpc Listen(stream ListenRequest) returns (stream ListenResponse) {
option (google.api.http) = {
post: "/v1beta1/{database=projects/*/databases/*}/documents:listen"
body: "*"
};
}
// Lists all the collection IDs underneath a document.
rpc ListCollectionIds(ListCollectionIdsRequest) returns (ListCollectionIdsResponse) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/databases/*/documents}:listCollectionIds"
body: "*"
additional_bindings {
post: "/v1beta1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds"
body: "*"
}
};
option (google.api.method_signature) = "parent";
}
}
// The request for [Firestore.GetDocument][google.firestore.v1beta1.Firestore.GetDocument].
message GetDocumentRequest {
// Required. The resource name of the Document to get. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
string name = 1 [(google.api.field_behavior) = REQUIRED];
// The fields to return. If not set, returns all fields.
//
// If the document has a field that is not present in this mask, that field
// will not be returned in the response.
DocumentMask mask = 2;
// The consistency mode for this transaction.
// If not set, defaults to strong consistency.
oneof consistency_selector {
// Reads the document in a transaction.
bytes transaction = 3;
// Reads the version of the document at the given time.
// This may not be older than 60 seconds.
google.protobuf.Timestamp read_time = 5;
}
}
// The request for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments].
message ListDocumentsRequest {
// Required. The parent resource name. In the format:
// `projects/{project_id}/databases/{database_id}/documents` or
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
// For example:
// `projects/my-project/databases/my-database/documents` or
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
string parent = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`
// or `messages`.
string collection_id = 2 [(google.api.field_behavior) = REQUIRED];
// The maximum number of documents to return.
int32 page_size = 3;
// The `next_page_token` value returned from a previous List request, if any.
string page_token = 4;
// The order to sort results by. For example: `priority desc, name`.
string order_by = 6;
// The fields to return. If not set, returns all fields.
//
// If a document has a field that is not present in this mask, that field
// will not be returned in the response.
DocumentMask mask = 7;
// The consistency mode for this transaction.
// If not set, defaults to strong consistency.
oneof consistency_selector {
// Reads documents in a transaction.
bytes transaction = 8;
// Reads documents as they were at the given time.
// This may not be older than 60 seconds.
google.protobuf.Timestamp read_time = 10;
}
// If the list should show missing documents. A missing document is a
// document that does not exist but has sub-documents. These documents will
// be returned with a key but will not have fields, [Document.create_time][google.firestore.v1beta1.Document.create_time],
// or [Document.update_time][google.firestore.v1beta1.Document.update_time] set.
//
// Requests with `show_missing` may not specify `where` or
// `order_by`.
bool show_missing = 12;
}
// The response for [Firestore.ListDocuments][google.firestore.v1beta1.Firestore.ListDocuments].
message ListDocumentsResponse {
// The Documents found.
repeated Document documents = 1;
// The next page token.
string next_page_token = 2;
}
// The request for [Firestore.CreateDocument][google.firestore.v1beta1.Firestore.CreateDocument].
message CreateDocumentRequest {
// Required. The parent resource. For example:
// `projects/{project_id}/databases/{database_id}/documents` or
// `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`
string parent = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`.
string collection_id = 2 [(google.api.field_behavior) = REQUIRED];
// The client-assigned document ID to use for this document.
//
// Optional. If not specified, an ID will be assigned by the service.
string document_id = 3;
// Required. The document to create. `name` must not be set.
Document document = 4 [(google.api.field_behavior) = REQUIRED];
// The fields to return. If not set, returns all fields.
//
// If the document has a field that is not present in this mask, that field
// will not be returned in the response.
DocumentMask mask = 5;
}
// The request for [Firestore.UpdateDocument][google.firestore.v1beta1.Firestore.UpdateDocument].
message UpdateDocumentRequest {
// Required. The updated document.
// Creates the document if it does not already exist.
Document document = 1 [(google.api.field_behavior) = REQUIRED];
// The fields to update.
// None of the field paths in the mask may contain a reserved name.
//
// If the document exists on the server and has fields not referenced in the
// mask, they are left unchanged.
// Fields referenced in the mask, but not present in the input document, are
// deleted from the document on the server.
DocumentMask update_mask = 2;
// The fields to return. If not set, returns all fields.
//
// If the document has a field that is not present in this mask, that field
// will not be returned in the response.
DocumentMask mask = 3;
// An optional precondition on the document.
// The request will fail if this is set and not met by the target document.
Precondition current_document = 4;
}
// The request for [Firestore.DeleteDocument][google.firestore.v1beta1.Firestore.DeleteDocument].
message DeleteDocumentRequest {
// Required. The resource name of the Document to delete. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
string name = 1 [(google.api.field_behavior) = REQUIRED];
// An optional precondition on the document.
// The request will fail if this is set and not met by the target document.
Precondition current_document = 2;
}
// The request for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments].
message BatchGetDocumentsRequest {
// Required. The database name. In the format:
// `projects/{project_id}/databases/{database_id}`.
string database = 1 [(google.api.field_behavior) = REQUIRED];
// The names of the documents to retrieve. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
// The request will fail if any of the document is not a child resource of the
// given `database`. Duplicate names will be elided.
repeated string documents = 2;
// The fields to return. If not set, returns all fields.
//
// If a document has a field that is not present in this mask, that field will
// not be returned in the response.
DocumentMask mask = 3;
// The consistency mode for this transaction.
// If not set, defaults to strong consistency.
oneof consistency_selector {
// Reads documents in a transaction.
bytes transaction = 4;
// Starts a new transaction and reads the documents.
// Defaults to a read-only transaction.
// The new transaction ID will be returned as the first response in the
// stream.
TransactionOptions new_transaction = 5;
// Reads documents as they were at the given time.
// This may not be older than 60 seconds.
google.protobuf.Timestamp read_time = 7;
}
}
// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1beta1.Firestore.BatchGetDocuments].
message BatchGetDocumentsResponse {
// A single result.
// This can be empty if the server is just returning a transaction.
oneof result {
// A document that was requested.
Document found = 1;
// A document name that was requested but does not exist. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
string missing = 2;
}
// The transaction that was started as part of this request.
// Will only be set in the first response, and only if
// [BatchGetDocumentsRequest.new_transaction][google.firestore.v1beta1.BatchGetDocumentsRequest.new_transaction] was set in the request.
bytes transaction = 3;
// The time at which the document was read.
// This may be monotically increasing, in this case the previous documents in
// the result stream are guaranteed not to have changed between their
// read_time and this one.
google.protobuf.Timestamp read_time = 4;
}
// The request for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction].
message BeginTransactionRequest {
// Required. The database name. In the format:
// `projects/{project_id}/databases/{database_id}`.
string database = 1 [(google.api.field_behavior) = REQUIRED];
// The options for the transaction.
// Defaults to a read-write transaction.
TransactionOptions options = 2;
}
// The response for [Firestore.BeginTransaction][google.firestore.v1beta1.Firestore.BeginTransaction].
message BeginTransactionResponse {
// The transaction that was started.
bytes transaction = 1;
}
// The request for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit].
message CommitRequest {
// Required. The database name. In the format:
// `projects/{project_id}/databases/{database_id}`.
string database = 1 [(google.api.field_behavior) = REQUIRED];
// The writes to apply.
//
// Always executed atomically and in order.
repeated Write writes = 2;
// If set, applies all writes in this transaction, and commits it.
bytes transaction = 3;
}
// The response for [Firestore.Commit][google.firestore.v1beta1.Firestore.Commit].
message CommitResponse {
// The result of applying the writes.
//
// This i-th write result corresponds to the i-th write in the
// request.
repeated WriteResult write_results = 1;
// The time at which the commit occurred.
google.protobuf.Timestamp commit_time = 2;
}
// The request for [Firestore.Rollback][google.firestore.v1beta1.Firestore.Rollback].
message RollbackRequest {
// Required. The database name. In the format:
// `projects/{project_id}/databases/{database_id}`.
string database = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The transaction to roll back.
bytes transaction = 2 [(google.api.field_behavior) = REQUIRED];
}
// The request for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery].
message RunQueryRequest {
// Required. The parent resource name. In the format:
// `projects/{project_id}/databases/{database_id}/documents` or
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
// For example:
// `projects/my-project/databases/my-database/documents` or
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
string parent = 1 [(google.api.field_behavior) = REQUIRED];
// The query to run.
oneof query_type {
// A structured query.
StructuredQuery structured_query = 2;
}
// The consistency mode for this transaction.
// If not set, defaults to strong consistency.
oneof consistency_selector {
// Reads documents in a transaction.
bytes transaction = 5;
// Starts a new transaction and reads the documents.
// Defaults to a read-only transaction.
// The new transaction ID will be returned as the first response in the
// stream.
TransactionOptions new_transaction = 6;
// Reads documents as they were at the given time.
// This may not be older than 60 seconds.
google.protobuf.Timestamp read_time = 7;
}
}
// The response for [Firestore.RunQuery][google.firestore.v1beta1.Firestore.RunQuery].
message RunQueryResponse {
// The transaction that was started as part of this request.
// Can only be set in the first response, and only if
// [RunQueryRequest.new_transaction][google.firestore.v1beta1.RunQueryRequest.new_transaction] was set in the request.
// If set, no other fields will be set in this response.
bytes transaction = 2;
// A query result.
// Not set when reporting partial progress.
Document document = 1;
// The time at which the document was read. This may be monotonically
// increasing; in this case, the previous documents in the result stream are
// guaranteed not to have changed between their `read_time` and this one.
//
// If the query returns no results, a response with `read_time` and no
// `document` will be sent, and this represents the time at which the query
// was run.
google.protobuf.Timestamp read_time = 3;
// The number of results that have been skipped due to an offset between
// the last response and the current response.
int32 skipped_results = 4;
}
// The request for [Firestore.Write][google.firestore.v1beta1.Firestore.Write].
//
// The first request creates a stream, or resumes an existing one from a token.
//
// When creating a new stream, the server replies with a response containing
// only an ID and a token, to use in the next request.
//
// When resuming a stream, the server first streams any responses later than the
// given token, then a response containing only an up-to-date token, to use in
// the next request.
message WriteRequest {
// Required. The database name. In the format:
// `projects/{project_id}/databases/{database_id}`.
// This is only required in the first message.
string database = 1 [(google.api.field_behavior) = REQUIRED];
// The ID of the write stream to resume.
// This may only be set in the first message. When left empty, a new write
// stream will be created.
string stream_id = 2;
// The writes to apply.
//
// Always executed atomically and in order.
// This must be empty on the first request.
// This may be empty on the last request.
// This must not be empty on all other requests.
repeated Write writes = 3;
// A stream token that was previously sent by the server.
//
// The client should set this field to the token from the most recent
// [WriteResponse][google.firestore.v1beta1.WriteResponse] it has received. This acknowledges that the client has
// received responses up to this token. After sending this token, earlier
// tokens may not be used anymore.
//
// The server may close the stream if there are too many unacknowledged
// responses.
//
// Leave this field unset when creating a new stream. To resume a stream at
// a specific point, set this field and the `stream_id` field.
//
// Leave this field unset when creating a new stream.
bytes stream_token = 4;
// Labels associated with this write request.
map<string, string> labels = 5;
}
// The response for [Firestore.Write][google.firestore.v1beta1.Firestore.Write].
message WriteResponse {
// The ID of the stream.
// Only set on the first message, when a new stream was created.
string stream_id = 1;
// A token that represents the position of this response in the stream.
// This can be used by a client to resume the stream at this point.
//
// This field is always set.
bytes stream_token = 2;
// The result of applying the writes.
//
// This i-th write result corresponds to the i-th write in the
// request.
repeated WriteResult write_results = 3;
// The time at which the commit occurred.
google.protobuf.Timestamp commit_time = 4;
}
// A request for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen]
message ListenRequest {
// Required. The database name. In the format:
// `projects/{project_id}/databases/{database_id}`.
string database = 1 [(google.api.field_behavior) = REQUIRED];
// The supported target changes.
oneof target_change {
// A target to add to this stream.
Target add_target = 2;
// The ID of a target to remove from this stream.
int32 remove_target = 3;
}
// Labels associated with this target change.
map<string, string> labels = 4;
}
// The response for [Firestore.Listen][google.firestore.v1beta1.Firestore.Listen].
message ListenResponse {
// The supported responses.
oneof response_type {
// Targets have changed.
TargetChange target_change = 2;
// A [Document][google.firestore.v1beta1.Document] has changed.
DocumentChange document_change = 3;
// A [Document][google.firestore.v1beta1.Document] has been deleted.
DocumentDelete document_delete = 4;
// A [Document][google.firestore.v1beta1.Document] has been removed from a target (because it is no longer
// relevant to that target).
DocumentRemove document_remove = 6;
// A filter to apply to the set of documents previously returned for the
// given target.
//
// Returned when documents may have been removed from the given target, but
// the exact documents are unknown.
ExistenceFilter filter = 5;
}
}
// A specification of a set of documents to listen to.
message Target {
// A target specified by a set of documents names.
message DocumentsTarget {
// The names of the documents to retrieve. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
// The request will fail if any of the document is not a child resource of
// the given `database`. Duplicate names will be elided.
repeated string documents = 2;
}
// A target specified by a query.
message QueryTarget {
// The parent resource name. In the format:
// `projects/{project_id}/databases/{database_id}/documents` or
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
// For example:
// `projects/my-project/databases/my-database/documents` or
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
string parent = 1;
// The query to run.
oneof query_type {
// A structured query.
StructuredQuery structured_query = 2;
}
}
// The type of target to listen to.
oneof target_type {
// A target specified by a query.
QueryTarget query = 2;
// A target specified by a set of document names.
DocumentsTarget documents = 3;
}
// When to start listening.
//
// If not specified, all matching Documents are returned before any
// subsequent changes.
oneof resume_type {
// A resume token from a prior [TargetChange][google.firestore.v1beta1.TargetChange] for an identical target.
//
// Using a resume token with a different target is unsupported and may fail.
bytes resume_token = 4;
// Start listening after a specific `read_time`.
//
// The client must know the state of matching documents at this time.
google.protobuf.Timestamp read_time = 11;
}
// The target ID that identifies the target on the stream. Must be a positive
// number and non-zero.
int32 target_id = 5;
// If the target should be removed once it is current and consistent.
bool once = 6;
}
// Targets being watched have changed.
message TargetChange {
// The type of change.
enum TargetChangeType {
// No change has occurred. Used only to send an updated `resume_token`.
NO_CHANGE = 0;
// The targets have been added.
ADD = 1;
// The targets have been removed.
REMOVE = 2;
// The targets reflect all changes committed before the targets were added
// to the stream.
//
// This will be sent after or with a `read_time` that is greater than or
// equal to the time at which the targets were added.
//
// Listeners can wait for this change if read-after-write semantics
// are desired.
CURRENT = 3;
// The targets have been reset, and a new initial state for the targets
// will be returned in subsequent changes.
//
// After the initial state is complete, `CURRENT` will be returned even
// if the target was previously indicated to be `CURRENT`.
RESET = 4;
}
// The type of change that occurred.
TargetChangeType target_change_type = 1;
// The target IDs of targets that have changed.
//
// If empty, the change applies to all targets.
//
// The order of the target IDs is not defined.
repeated int32 target_ids = 2;
// The error that resulted in this change, if applicable.
google.rpc.Status cause = 3;
// A token that can be used to resume the stream for the given `target_ids`,
// or all targets if `target_ids` is empty.
//
// Not set on every target change.
bytes resume_token = 4;
// The consistent `read_time` for the given `target_ids` (omitted when the
// target_ids are not at a consistent snapshot).
//
// The stream is guaranteed to send a `read_time` with `target_ids` empty
// whenever the entire stream reaches a new consistent snapshot. ADD,
// CURRENT, and RESET messages are guaranteed to (eventually) result in a
// new consistent snapshot (while NO_CHANGE and REMOVE messages are not).
//
// For a given stream, `read_time` is guaranteed to be monotonically
// increasing.
google.protobuf.Timestamp read_time = 6;
}
// The request for [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds].
message ListCollectionIdsRequest {
// Required. The parent document. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
// For example:
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
string parent = 1 [(google.api.field_behavior) = REQUIRED];
// The maximum number of results to return.
int32 page_size = 2;
// A page token. Must be a value from
// [ListCollectionIdsResponse][google.firestore.v1beta1.ListCollectionIdsResponse].
string page_token = 3;
}
// The response from [Firestore.ListCollectionIds][google.firestore.v1beta1.Firestore.ListCollectionIds].
message ListCollectionIdsResponse {
// The collection ids.
repeated string collection_ids = 1;
// A page token that may be used to continue the list.
string next_page_token = 2;
}

View file

@ -0,0 +1,365 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.admin.v1beta1;
import "google/api/annotations.proto";
import "google/firestore/admin/v1beta1/index.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin";
option java_multiple_files = true;
option java_outer_classname = "FirestoreAdminProto";
option java_package = "com.google.firestore.admin.v1beta1";
option objc_class_prefix = "GCFS";
// The Cloud Firestore Admin API.
//
// This API provides several administrative services for Cloud Firestore.
//
// # Concepts
//
// Project, Database, Namespace, Collection, and Document are used as defined in
// the Google Cloud Firestore API.
//
// Operation: An Operation represents work being performed in the background.
//
//
// # Services
//
// ## Index
//
// The index service manages Cloud Firestore indexes.
//
// Index creation is performed asynchronously.
// An Operation resource is created for each such asynchronous operation.
// The state of the operation (including any errors encountered)
// may be queried via the Operation resource.
//
// ## Metadata
//
// Provides metadata and statistical information about data in Cloud Firestore.
// The data provided as part of this API may be stale.
//
// ## Operation
//
// The Operations collection provides a record of actions performed for the
// specified Project (including any Operations in progress). Operations are not
// created directly but through calls on other collections or resources.
//
// An Operation that is not yet done may be cancelled. The request to cancel is
// asynchronous and the Operation may continue to run for some time after the
// request to cancel is made.
//
// An Operation that is done may be deleted so that it is no longer listed as
// part of the Operation collection.
//
// Operations are created by service `FirestoreAdmin`, but are accessed via
// service `google.longrunning.Operations`.
service FirestoreAdmin {
// Creates the specified index.
// A newly created index's initial state is `CREATING`. On completion of the
// returned [google.longrunning.Operation][google.longrunning.Operation], the state will be `READY`.
// If the index already exists, the call will return an `ALREADY_EXISTS`
// status.
//
// During creation, the process could result in an error, in which case the
// index will move to the `ERROR` state. The process can be recovered by
// fixing the data that caused the error, removing the index with
// [delete][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex], then re-creating the index with
// [create][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex].
//
// Indexes with a single field cannot be created.
rpc CreateIndex(CreateIndexRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{parent=projects/*/databases/*}/indexes"
body: "index"
};
}
// Lists the indexes that match the specified filters.
rpc ListIndexes(ListIndexesRequest) returns (ListIndexesResponse) {
option (google.api.http) = {
get: "/v1beta1/{parent=projects/*/databases/*}/indexes"
};
}
// Gets an index.
rpc GetIndex(GetIndexRequest) returns (Index) {
option (google.api.http) = {
get: "/v1beta1/{name=projects/*/databases/*/indexes/*}"
};
}
// Deletes an index.
rpc DeleteIndex(DeleteIndexRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/v1beta1/{name=projects/*/databases/*/indexes/*}"
};
}
// Exports a copy of all or a subset of documents from Google Cloud Firestore
// to another storage system, such as Google Cloud Storage. Recent updates to
// documents may not be reflected in the export. The export occurs in the
// background and its progress can be monitored and managed via the
// Operation resource that is created. The output of an export may only be
// used once the associated operation is done. If an export operation is
// cancelled before completion it may leave partial data behind in Google
// Cloud Storage.
rpc ExportDocuments(ExportDocumentsRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/databases/*}:exportDocuments"
body: "*"
};
}
// Imports documents into Google Cloud Firestore. Existing documents with the
// same name are overwritten. The import occurs in the background and its
// progress can be monitored and managed via the Operation resource that is
// created. If an ImportDocuments operation is cancelled, it is possible
// that a subset of the data has already been imported to Cloud Firestore.
rpc ImportDocuments(ImportDocumentsRequest) returns (google.longrunning.Operation) {
option (google.api.http) = {
post: "/v1beta1/{name=projects/*/databases/*}:importDocuments"
body: "*"
};
}
}
// Metadata for index operations. This metadata populates
// the metadata field of [google.longrunning.Operation][google.longrunning.Operation].
message IndexOperationMetadata {
// The type of index operation.
enum OperationType {
// Unspecified. Never set by server.
OPERATION_TYPE_UNSPECIFIED = 0;
// The operation is creating the index. Initiated by a `CreateIndex` call.
CREATING_INDEX = 1;
}
// The time that work began on the operation.
google.protobuf.Timestamp start_time = 1;
// The time the operation ended, either successfully or otherwise. Unset if
// the operation is still active.
google.protobuf.Timestamp end_time = 2;
// The index resource that this operation is acting on. For example:
// `projects/{project_id}/databases/{database_id}/indexes/{index_id}`
string index = 3;
// The type of index operation.
OperationType operation_type = 4;
// True if the [google.longrunning.Operation] was cancelled. If the
// cancellation is in progress, cancelled will be true but
// [google.longrunning.Operation.done][google.longrunning.Operation.done] will be false.
bool cancelled = 5;
// Progress of the existing operation, measured in number of documents.
Progress document_progress = 6;
}
// Measures the progress of a particular metric.
message Progress {
// An estimate of how much work has been completed. Note that this may be
// greater than `work_estimated`.
int64 work_completed = 1;
// An estimate of how much work needs to be performed. Zero if the
// work estimate is unavailable. May change as work progresses.
int64 work_estimated = 2;
}
// The request for [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta1.FirestoreAdmin.CreateIndex].
message CreateIndexRequest {
// The name of the database this index will apply to. For example:
// `projects/{project_id}/databases/{database_id}`
string parent = 1;
// The index to create. The name and state fields are output only and will be
// ignored. Certain single field indexes cannot be created or deleted.
Index index = 2;
}
// The request for [FirestoreAdmin.GetIndex][google.firestore.admin.v1beta1.FirestoreAdmin.GetIndex].
message GetIndexRequest {
// The name of the index. For example:
// `projects/{project_id}/databases/{database_id}/indexes/{index_id}`
string name = 1;
}
// The request for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes].
message ListIndexesRequest {
// The database name. For example:
// `projects/{project_id}/databases/{database_id}`
string parent = 1;
string filter = 2;
// The standard List page size.
int32 page_size = 3;
// The standard List page token.
string page_token = 4;
}
// The request for [FirestoreAdmin.DeleteIndex][google.firestore.admin.v1beta1.FirestoreAdmin.DeleteIndex].
message DeleteIndexRequest {
// The index name. For example:
// `projects/{project_id}/databases/{database_id}/indexes/{index_id}`
string name = 1;
}
// The response for [FirestoreAdmin.ListIndexes][google.firestore.admin.v1beta1.FirestoreAdmin.ListIndexes].
message ListIndexesResponse {
// The indexes.
repeated Index indexes = 1;
// The standard List next-page token.
string next_page_token = 2;
}
// The request for [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ExportDocuments].
message ExportDocumentsRequest {
// Database to export. Should be of the form:
// `projects/{project_id}/databases/{database_id}`.
string name = 1;
// Which collection ids to export. Unspecified means all collections.
repeated string collection_ids = 3;
// The output URI. Currently only supports Google Cloud Storage URIs of the
// form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name
// of the Google Cloud Storage bucket and `NAMESPACE_PATH` is an optional
// Google Cloud Storage namespace path. When
// choosing a name, be sure to consider Google Cloud Storage naming
// guidelines: https://cloud.google.com/storage/docs/naming.
// If the URI is a bucket (without a namespace path), a prefix will be
// generated based on the start time.
string output_uri_prefix = 4;
}
// The request for [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta1.FirestoreAdmin.ImportDocuments].
message ImportDocumentsRequest {
// Database to import into. Should be of the form:
// `projects/{project_id}/databases/{database_id}`.
string name = 1;
// Which collection ids to import. Unspecified means all collections included
// in the import.
repeated string collection_ids = 3;
// Location of the exported files.
// This must match the output_uri_prefix of an ExportDocumentsResponse from
// an export that has completed successfully.
// See:
// [google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix][google.firestore.admin.v1beta1.ExportDocumentsResponse.output_uri_prefix].
string input_uri_prefix = 4;
}
// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field.
message ExportDocumentsResponse {
// Location of the output files. This can be used to begin an import
// into Cloud Firestore (this project or another project) after the operation
// completes successfully.
string output_uri_prefix = 1;
}
// Metadata for ExportDocuments operations.
message ExportDocumentsMetadata {
// The time that work began on the operation.
google.protobuf.Timestamp start_time = 1;
// The time the operation ended, either successfully or otherwise. Unset if
// the operation is still active.
google.protobuf.Timestamp end_time = 2;
// The state of the export operation.
OperationState operation_state = 3;
// An estimate of the number of documents processed.
Progress progress_documents = 4;
// An estimate of the number of bytes processed.
Progress progress_bytes = 5;
// Which collection ids are being exported.
repeated string collection_ids = 6;
// Where the entities are being exported to.
string output_uri_prefix = 7;
}
// Metadata for ImportDocuments operations.
message ImportDocumentsMetadata {
// The time that work began on the operation.
google.protobuf.Timestamp start_time = 1;
// The time the operation ended, either successfully or otherwise. Unset if
// the operation is still active.
google.protobuf.Timestamp end_time = 2;
// The state of the import operation.
OperationState operation_state = 3;
// An estimate of the number of documents processed.
Progress progress_documents = 4;
// An estimate of the number of bytes processed.
Progress progress_bytes = 5;
// Which collection ids are being imported.
repeated string collection_ids = 6;
// The location of the documents being imported.
string input_uri_prefix = 7;
}
// The various possible states for an ongoing Operation.
enum OperationState {
// Unspecified.
STATE_UNSPECIFIED = 0;
// Request is being prepared for processing.
INITIALIZING = 1;
// Request is actively being processed.
PROCESSING = 2;
// Request is in the process of being cancelled after user called
// google.longrunning.Operations.CancelOperation on the operation.
CANCELLING = 3;
// Request has been processed and is in its finalization stage.
FINALIZING = 4;
// Request has completed successfully.
SUCCESSFUL = 5;
// Request has finished being processed, but encountered an error.
FAILED = 6;
// Request has finished being cancelled after user called
// google.longrunning.Operations.CancelOperation.
CANCELLED = 7;
}

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,669 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.cloud.firestore_v1beta1.proto import (
document_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2,
)
from google.cloud.firestore_v1beta1.proto import (
firestore_pb2 as google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class FirestoreStub(object):
"""Specification of the Firestore API.
The Cloud Firestore service.
This service exposes several types of comparable timestamps:
* `create_time` - The time at which a document was created. Changes only
when a document is deleted, then re-created. Increases in a strict
monotonic fashion.
* `update_time` - The time at which a document was last updated. Changes
every time a document is modified. Does not change when a write results
in no modifications. Increases in a strict monotonic fashion.
* `read_time` - The time at which a particular state was observed. Used
to denote a consistent snapshot of the database or the time at which a
Document was observed to not exist.
* `commit_time` - The time at which the writes in a transaction were
committed. Any read with an equal or greater `read_time` is guaranteed
to see the effects of the transaction.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDocument = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/GetDocument",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.GetDocumentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString,
)
self.ListDocuments = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/ListDocuments",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.FromString,
)
self.CreateDocument = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/CreateDocument",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString,
)
self.UpdateDocument = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/UpdateDocument",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString,
)
self.DeleteDocument = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/DeleteDocument",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.BatchGetDocuments = channel.unary_stream(
"/google.firestore.v1beta1.Firestore/BatchGetDocuments",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.FromString,
)
self.BeginTransaction = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/BeginTransaction",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.FromString,
)
self.Commit = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/Commit",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/Rollback",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RollbackRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.RunQuery = channel.unary_stream(
"/google.firestore.v1beta1.Firestore/RunQuery",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryResponse.FromString,
)
self.Write = channel.stream_stream(
"/google.firestore.v1beta1.Firestore/Write",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteResponse.FromString,
)
self.Listen = channel.stream_stream(
"/google.firestore.v1beta1.Firestore/Listen",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenResponse.FromString,
)
self.ListCollectionIds = channel.unary_unary(
"/google.firestore.v1beta1.Firestore/ListCollectionIds",
request_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.FromString,
)
class FirestoreServicer(object):
"""Specification of the Firestore API.
The Cloud Firestore service.
This service exposes several types of comparable timestamps:
* `create_time` - The time at which a document was created. Changes only
when a document is deleted, then re-created. Increases in a strict
monotonic fashion.
* `update_time` - The time at which a document was last updated. Changes
every time a document is modified. Does not change when a write results
in no modifications. Increases in a strict monotonic fashion.
* `read_time` - The time at which a particular state was observed. Used
to denote a consistent snapshot of the database or the time at which a
Document was observed to not exist.
* `commit_time` - The time at which the writes in a transaction were
committed. Any read with an equal or greater `read_time` is guaranteed
to see the effects of the transaction.
"""
def GetDocument(self, request, context):
"""Gets a single document.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDocuments(self, request, context):
"""Lists documents.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateDocument(self, request, context):
"""Creates a new document.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateDocument(self, request, context):
"""Updates or inserts a document.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteDocument(self, request, context):
"""Deletes a document.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BatchGetDocuments(self, request, context):
"""Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BeginTransaction(self, request, context):
"""Starts a new transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Commit(self, request, context):
"""Commits a transaction, while optionally updating documents.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Rollback(self, request, context):
"""Rolls back a transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def RunQuery(self, request, context):
"""Runs a query.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Write(self, request_iterator, context):
"""Streams batches of document updates and deletes, in order.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Listen(self, request_iterator, context):
"""Listens to changes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListCollectionIds(self, request, context):
"""Lists all the collection IDs underneath a document.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_FirestoreServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetDocument": grpc.unary_unary_rpc_method_handler(
servicer.GetDocument,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.GetDocumentRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.SerializeToString,
),
"ListDocuments": grpc.unary_unary_rpc_method_handler(
servicer.ListDocuments,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.SerializeToString,
),
"CreateDocument": grpc.unary_unary_rpc_method_handler(
servicer.CreateDocument,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.SerializeToString,
),
"UpdateDocument": grpc.unary_unary_rpc_method_handler(
servicer.UpdateDocument,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.SerializeToString,
),
"DeleteDocument": grpc.unary_unary_rpc_method_handler(
servicer.DeleteDocument,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"BatchGetDocuments": grpc.unary_stream_rpc_method_handler(
servicer.BatchGetDocuments,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.SerializeToString,
),
"BeginTransaction": grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.SerializeToString,
),
"Commit": grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitResponse.SerializeToString,
),
"Rollback": grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RollbackRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"RunQuery": grpc.unary_stream_rpc_method_handler(
servicer.RunQuery,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryResponse.SerializeToString,
),
"Write": grpc.stream_stream_rpc_method_handler(
servicer.Write,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteResponse.SerializeToString,
),
"Listen": grpc.stream_stream_rpc_method_handler(
servicer.Listen,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenResponse.SerializeToString,
),
"ListCollectionIds": grpc.unary_unary_rpc_method_handler(
servicer.ListCollectionIds,
request_deserializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.FromString,
response_serializer=google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.firestore.v1beta1.Firestore", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Firestore(object):
"""Specification of the Firestore API.
The Cloud Firestore service.
This service exposes several types of comparable timestamps:
* `create_time` - The time at which a document was created. Changes only
when a document is deleted, then re-created. Increases in a strict
monotonic fashion.
* `update_time` - The time at which a document was last updated. Changes
every time a document is modified. Does not change when a write results
in no modifications. Increases in a strict monotonic fashion.
* `read_time` - The time at which a particular state was observed. Used
to denote a consistent snapshot of the database or the time at which a
Document was observed to not exist.
* `commit_time` - The time at which the writes in a transaction were
committed. Any read with an equal or greater `read_time` is guaranteed
to see the effects of the transaction.
"""
@staticmethod
def GetDocument(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/GetDocument",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.GetDocumentRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListDocuments(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/ListDocuments",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def CreateDocument(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/CreateDocument",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def UpdateDocument(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/UpdateDocument",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_document__pb2.Document.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def DeleteDocument(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/DeleteDocument",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def BatchGetDocuments(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/google.firestore.v1beta1.Firestore/BatchGetDocuments",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def BeginTransaction(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/BeginTransaction",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Commit(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/Commit",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.CommitResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Rollback(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/Rollback",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RollbackRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def RunQuery(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_stream(
request,
target,
"/google.firestore.v1beta1.Firestore/RunQuery",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.RunQueryResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Write(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target,
"/google.firestore.v1beta1.Firestore/Write",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.WriteResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Listen(
request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.stream_stream(
request_iterator,
target,
"/google.firestore.v1beta1.Firestore/Listen",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListenResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def ListCollectionIds(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/google.firestore.v1beta1.Firestore/ListCollectionIds",
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.SerializeToString,
google_dot_cloud_dot_firestore__v1beta1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)

View file

@ -0,0 +1,102 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.admin.v1beta1;
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin";
option java_multiple_files = true;
option java_outer_classname = "IndexProto";
option java_package = "com.google.firestore.admin.v1beta1";
option objc_class_prefix = "GCFS";
// A field of an index.
message IndexField {
// The mode determines how a field is indexed.
enum Mode {
// The mode is unspecified.
MODE_UNSPECIFIED = 0;
// The field's values are indexed so as to support sequencing in
// ascending order and also query by <, >, <=, >=, and =.
ASCENDING = 2;
// The field's values are indexed so as to support sequencing in
// descending order and also query by <, >, <=, >=, and =.
DESCENDING = 3;
// The field's array values are indexed so as to support membership using
// ARRAY_CONTAINS queries.
ARRAY_CONTAINS = 4;
}
// The path of the field. Must match the field path specification described
// by [google.firestore.v1beta1.Document.fields][fields].
// Special field path `__name__` may be used by itself or at the end of a
// path. `__type__` may be used only at the end of path.
string field_path = 1;
// The field's mode.
Mode mode = 2;
}
// An index definition.
message Index {
// The state of an index. During index creation, an index will be in the
// `CREATING` state. If the index is created successfully, it will transition
// to the `READY` state. If the index is not able to be created, it will
// transition to the `ERROR` state.
enum State {
// The state is unspecified.
STATE_UNSPECIFIED = 0;
// The index is being created.
// There is an active long-running operation for the index.
// The index is updated when writing a document.
// Some index data may exist.
CREATING = 3;
// The index is ready to be used.
// The index is updated when writing a document.
// The index is fully populated from all stored documents it applies to.
READY = 2;
// The index was being created, but something went wrong.
// There is no active long-running operation for the index,
// and the most recently finished long-running operation failed.
// The index is not updated when writing a document.
// Some index data may exist.
ERROR = 5;
}
// The resource name of the index.
// Output only.
string name = 1;
// The collection ID to which this index applies. Required.
string collection_id = 2;
// The fields to index.
repeated IndexField fields = 3;
// The state of the index.
// Output only.
State state = 6;
}

View file

@ -0,0 +1,34 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.admin.v1beta1;
import "google/api/annotations.proto";
import "google/type/latlng.proto";
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta1;admin";
option java_multiple_files = true;
option java_outer_classname = "LocationProto";
option java_package = "com.google.firestore.admin.v1beta1";
option objc_class_prefix = "GCFS";
// The metadata message for [google.cloud.location.Location.metadata][google.cloud.location.Location.metadata].
message LocationMetadata {
}

View file

@ -0,0 +1,203 @@
// Copyright 2018 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.admin.v1beta2;
import "google/api/annotations.proto";
import "google/firestore/admin/v1beta2/index.proto";
import "google/protobuf/timestamp.proto";
option csharp_namespace = "Google.Cloud.Firestore.Admin.V1Beta2";
option go_package = "google.golang.org/genproto/googleapis/firestore/admin/v1beta2;admin";
option java_multiple_files = true;
option java_outer_classname = "OperationProto";
option java_package = "com.google.firestore.admin.v1beta2";
option objc_class_prefix = "GCFS";
// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from
// [FirestoreAdmin.CreateIndex][google.firestore.admin.v1beta2.FirestoreAdmin.CreateIndex].
message IndexOperationMetadata {
// The time this operation started.
google.protobuf.Timestamp start_time = 1;
// The time this operation completed. Will be unset if operation still in
// progress.
google.protobuf.Timestamp end_time = 2;
// The index resource that this operation is acting on. For example:
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/indexes/{index_id}`
string index = 3;
// The state of the operation.
OperationState state = 4;
// The progress, in documents, of this operation.
Progress progress_documents = 5;
// The progress, in bytes, of this operation.
Progress progress_bytes = 6;
}
// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from
// [FirestoreAdmin.UpdateField][google.firestore.admin.v1beta2.FirestoreAdmin.UpdateField].
message FieldOperationMetadata {
// Information about an index configuration change.
message IndexConfigDelta {
// Specifies how the index is changing.
enum ChangeType {
// The type of change is not specified or known.
CHANGE_TYPE_UNSPECIFIED = 0;
// The single field index is being added.
ADD = 1;
// The single field index is being removed.
REMOVE = 2;
}
// Specifies how the index is changing.
ChangeType change_type = 1;
// The index being changed.
Index index = 2;
}
// The time this operation started.
google.protobuf.Timestamp start_time = 1;
// The time this operation completed. Will be unset if operation still in
// progress.
google.protobuf.Timestamp end_time = 2;
// The field resource that this operation is acting on. For example:
// `projects/{project_id}/databases/{database_id}/collectionGroups/{collection_id}/fields/{field_path}`
string field = 3;
// A list of [IndexConfigDelta][google.firestore.admin.v1beta2.FieldOperationMetadata.IndexConfigDelta], which describe the intent of this
// operation.
repeated IndexConfigDelta index_config_deltas = 4;
// The state of the operation.
OperationState state = 5;
// The progress, in documents, of this operation.
Progress document_progress = 6;
// The progress, in bytes, of this operation.
Progress bytes_progress = 7;
}
// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from
// [FirestoreAdmin.ExportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ExportDocuments].
message ExportDocumentsMetadata {
// The time this operation started.
google.protobuf.Timestamp start_time = 1;
// The time this operation completed. Will be unset if operation still in
// progress.
google.protobuf.Timestamp end_time = 2;
// The state of the export operation.
OperationState operation_state = 3;
// The progress, in documents, of this operation.
Progress progress_documents = 4;
// The progress, in bytes, of this operation.
Progress progress_bytes = 5;
// Which collection ids are being exported.
repeated string collection_ids = 6;
// Where the entities are being exported to.
string output_uri_prefix = 7;
}
// Metadata for [google.longrunning.Operation][google.longrunning.Operation] results from
// [FirestoreAdmin.ImportDocuments][google.firestore.admin.v1beta2.FirestoreAdmin.ImportDocuments].
message ImportDocumentsMetadata {
// The time this operation started.
google.protobuf.Timestamp start_time = 1;
// The time this operation completed. Will be unset if operation still in
// progress.
google.protobuf.Timestamp end_time = 2;
// The state of the import operation.
OperationState operation_state = 3;
// The progress, in documents, of this operation.
Progress progress_documents = 4;
// The progress, in bytes, of this operation.
Progress progress_bytes = 5;
// Which collection ids are being imported.
repeated string collection_ids = 6;
// The location of the documents being imported.
string input_uri_prefix = 7;
}
// Returned in the [google.longrunning.Operation][google.longrunning.Operation] response field.
message ExportDocumentsResponse {
// Location of the output files. This can be used to begin an import
// into Cloud Firestore (this project or another project) after the operation
// completes successfully.
string output_uri_prefix = 1;
}
// Describes the progress of the operation.
// Unit of work is generic and must be interpreted based on where [Progress][google.firestore.admin.v1beta2.Progress]
// is used.
message Progress {
// The amount of work estimated.
int64 estimated_work = 1;
// The amount of work completed.
int64 completed_work = 2;
}
// Describes the state of the operation.
enum OperationState {
// Unspecified.
OPERATION_STATE_UNSPECIFIED = 0;
// Request is being prepared for processing.
INITIALIZING = 1;
// Request is actively being processed.
PROCESSING = 2;
// Request is in the process of being cancelled after user called
// google.longrunning.Operations.CancelOperation on the operation.
CANCELLING = 3;
// Request has been processed and is in its finalization stage.
FINALIZING = 4;
// Request has completed successfully.
SUCCESSFUL = 5;
// Request has finished being processed, but encountered an error.
FAILED = 6;
// Request has finished being cancelled after user called
// google.longrunning.Operations.CancelOperation.
CANCELLED = 7;
}

View file

@ -0,0 +1,244 @@
// Copyright 2019 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.v1beta1;
import "google/firestore/v1beta1/document.proto";
import "google/protobuf/wrappers.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore";
option java_multiple_files = true;
option java_outer_classname = "QueryProto";
option java_package = "com.google.firestore.v1beta1";
option objc_class_prefix = "GCFS";
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
option ruby_package = "Google::Cloud::Firestore::V1beta1";
// A Firestore query.
message StructuredQuery {
// A selection of a collection, such as `messages as m1`.
message CollectionSelector {
// The collection ID.
// When set, selects only collections with this ID.
string collection_id = 2;
// When false, selects only collections that are immediate children of
// the `parent` specified in the containing `RunQueryRequest`.
// When true, selects all descendant collections.
bool all_descendants = 3;
}
// A filter.
message Filter {
// The type of filter.
oneof filter_type {
// A composite filter.
CompositeFilter composite_filter = 1;
// A filter on a document field.
FieldFilter field_filter = 2;
// A filter that takes exactly one argument.
UnaryFilter unary_filter = 3;
}
}
// A filter that merges multiple other filters using the given operator.
message CompositeFilter {
// A composite filter operator.
enum Operator {
// Unspecified. This value must not be used.
OPERATOR_UNSPECIFIED = 0;
// The results are required to satisfy each of the combined filters.
AND = 1;
}
// The operator for combining multiple filters.
Operator op = 1;
// The list of filters to combine.
// Must contain at least one filter.
repeated Filter filters = 2;
}
// A filter on a specific field.
message FieldFilter {
// A field filter operator.
enum Operator {
// Unspecified. This value must not be used.
OPERATOR_UNSPECIFIED = 0;
// Less than. Requires that the field come first in `order_by`.
LESS_THAN = 1;
// Less than or equal. Requires that the field come first in `order_by`.
LESS_THAN_OR_EQUAL = 2;
// Greater than. Requires that the field come first in `order_by`.
GREATER_THAN = 3;
// Greater than or equal. Requires that the field come first in
// `order_by`.
GREATER_THAN_OR_EQUAL = 4;
// Equal.
EQUAL = 5;
// Contains. Requires that the field is an array.
ARRAY_CONTAINS = 7;
// In. Requires that `value` is a non-empty ArrayValue with at most 10
// values.
IN = 8;
// Contains any. Requires that the field is an array and
// `value` is a non-empty ArrayValue with at most 10 values.
ARRAY_CONTAINS_ANY = 9;
}
// The field to filter by.
FieldReference field = 1;
// The operator to filter by.
Operator op = 2;
// The value to compare to.
Value value = 3;
}
// A filter with a single operand.
message UnaryFilter {
// A unary operator.
enum Operator {
// Unspecified. This value must not be used.
OPERATOR_UNSPECIFIED = 0;
// Test if a field is equal to NaN.
IS_NAN = 2;
// Test if an expression evaluates to Null.
IS_NULL = 3;
}
// The unary operator to apply.
Operator op = 1;
// The argument to the filter.
oneof operand_type {
// The field to which to apply the operator.
FieldReference field = 2;
}
}
// An order on a field.
message Order {
// The field to order by.
FieldReference field = 1;
// The direction to order by. Defaults to `ASCENDING`.
Direction direction = 2;
}
// A reference to a field, such as `max(messages.time) as max_time`.
message FieldReference {
string field_path = 2;
}
// The projection of document's fields to return.
message Projection {
// The fields to return.
//
// If empty, all fields are returned. To only return the name
// of the document, use `['__name__']`.
repeated FieldReference fields = 2;
}
// A sort direction.
enum Direction {
// Unspecified.
DIRECTION_UNSPECIFIED = 0;
// Ascending.
ASCENDING = 1;
// Descending.
DESCENDING = 2;
}
// The projection to return.
Projection select = 1;
// The collections to query.
repeated CollectionSelector from = 2;
// The filter to apply.
Filter where = 3;
// The order to apply to the query results.
//
// Firestore guarantees a stable ordering through the following rules:
//
// * Any field required to appear in `order_by`, that is not already
// specified in `order_by`, is appended to the order in field name order
// by default.
// * If an order on `__name__` is not specified, it is appended by default.
//
// Fields are appended with the same sort direction as the last order
// specified, or 'ASCENDING' if no order was specified. For example:
//
// * `SELECT * FROM Foo ORDER BY A` becomes
// `SELECT * FROM Foo ORDER BY A, __name__`
// * `SELECT * FROM Foo ORDER BY A DESC` becomes
// `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC`
// * `SELECT * FROM Foo WHERE A > 1` becomes
// `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__`
repeated Order order_by = 4;
// A starting point for the query results.
Cursor start_at = 7;
// A end point for the query results.
Cursor end_at = 8;
// The number of results to skip.
//
// Applies before limit, but after all other constraints. Must be >= 0 if
// specified.
int32 offset = 6;
// The maximum number of results to return.
//
// Applies after all other constraints.
// Must be >= 0 if specified.
google.protobuf.Int32Value limit = 5;
}
// A position in a query result set.
message Cursor {
// The values that represent a position, in the order they appear in
// the order by clause of a query.
//
// Can contain fewer values than specified in the order by clause.
repeated Value values = 1;
// If the position is just before or just after the given values, relative
// to the sort order defined by the query.
bool before = 2;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,255 @@
// Copyright 2019 Google LLC.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
syntax = "proto3";
package google.firestore.v1beta1;
import "google/firestore/v1beta1/common.proto";
import "google/firestore/v1beta1/document.proto";
import "google/protobuf/timestamp.proto";
import "google/api/annotations.proto";
option csharp_namespace = "Google.Cloud.Firestore.V1Beta1";
option go_package = "google.golang.org/genproto/googleapis/firestore/v1beta1;firestore";
option java_multiple_files = true;
option java_outer_classname = "WriteProto";
option java_package = "com.google.firestore.v1beta1";
option objc_class_prefix = "GCFS";
option php_namespace = "Google\\Cloud\\Firestore\\V1beta1";
option ruby_package = "Google::Cloud::Firestore::V1beta1";
// A write on a document.
message Write {
// The operation to execute.
oneof operation {
// A document to write.
Document update = 1;
// A document name to delete. In the format:
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
string delete = 2;
// Applies a transformation to a document.
// At most one `transform` per document is allowed in a given request.
// An `update` cannot follow a `transform` on the same document in a given
// request.
DocumentTransform transform = 6;
}
// The fields to update in this write.
//
// This field can be set only when the operation is `update`.
// If the mask is not set for an `update` and the document exists, any
// existing data will be overwritten.
// If the mask is set and the document on the server has fields not covered by
// the mask, they are left unchanged.
// Fields referenced in the mask, but not present in the input document, are
// deleted from the document on the server.
// The field paths in this mask must not contain a reserved field name.
DocumentMask update_mask = 3;
// An optional precondition on the document.
//
// The write will fail if this is set and not met by the target document.
Precondition current_document = 4;
}
// A transformation of a document.
message DocumentTransform {
// A transformation of a field of the document.
message FieldTransform {
// A value that is calculated by the server.
enum ServerValue {
// Unspecified. This value must not be used.
SERVER_VALUE_UNSPECIFIED = 0;
// The time at which the server processed the request, with millisecond
// precision.
REQUEST_TIME = 1;
}
// The path of the field. See [Document.fields][google.firestore.v1beta1.Document.fields] for the field path syntax
// reference.
string field_path = 1;
// The transformation to apply on the field.
oneof transform_type {
// Sets the field to the given server value.
ServerValue set_to_server_value = 2;
// Adds the given value to the field's current value.
//
// This must be an integer or a double value.
// If the field is not an integer or double, or if the field does not yet
// exist, the transformation will set the field to the given value.
// If either of the given value or the current field value are doubles,
// both values will be interpreted as doubles. Double arithmetic and
// representation of double values follow IEEE 754 semantics.
// If there is positive/negative integer overflow, the field is resolved
// to the largest magnitude positive/negative integer.
Value increment = 3;
// Sets the field to the maximum of its current value and the given value.
//
// This must be an integer or a double value.
// If the field is not an integer or double, or if the field does not yet
// exist, the transformation will set the field to the given value.
// If a maximum operation is applied where the field and the input value
// are of mixed types (that is - one is an integer and one is a double)
// the field takes on the type of the larger operand. If the operands are
// equivalent (e.g. 3 and 3.0), the field does not change.
// 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and
// zero input value is always the stored value.
// The maximum of any numeric value x and NaN is NaN.
Value maximum = 4;
// Sets the field to the minimum of its current value and the given value.
//
// This must be an integer or a double value.
// If the field is not an integer or double, or if the field does not yet
// exist, the transformation will set the field to the input value.
// If a minimum operation is applied where the field and the input value
// are of mixed types (that is - one is an integer and one is a double)
// the field takes on the type of the smaller operand. If the operands are
// equivalent (e.g. 3 and 3.0), the field does not change.
// 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and
// zero input value is always the stored value.
// The minimum of any numeric value x and NaN is NaN.
Value minimum = 5;
// Append the given elements in order if they are not already present in
// the current field value.
// If the field is not an array, or if the field does not yet exist, it is
// first set to the empty array.
//
// Equivalent numbers of different types (e.g. 3L and 3.0) are
// considered equal when checking if a value is missing.
// NaN is equal to NaN, and Null is equal to Null.
// If the input contains multiple equivalent values, only the first will
// be considered.
//
// The corresponding transform_result will be the null value.
ArrayValue append_missing_elements = 6;
// Remove all of the given elements from the array in the field.
// If the field is not an array, or if the field does not yet exist, it is
// set to the empty array.
//
// Equivalent numbers of the different types (e.g. 3L and 3.0) are
// considered equal when deciding whether an element should be removed.
// NaN is equal to NaN, and Null is equal to Null.
// This will remove all equivalent values if there are duplicates.
//
// The corresponding transform_result will be the null value.
ArrayValue remove_all_from_array = 7;
}
}
// The name of the document to transform.
string document = 1;
// The list of transformations to apply to the fields of the document, in
// order.
// This must not be empty.
repeated FieldTransform field_transforms = 2;
}
// The result of applying a write.
message WriteResult {
// The last update time of the document after applying the write. Not set
// after a `delete`.
//
// If the write did not actually change the document, this will be the
// previous update_time.
google.protobuf.Timestamp update_time = 1;
// The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1beta1.DocumentTransform.FieldTransform], in the
// same order.
repeated Value transform_results = 2;
}
// A [Document][google.firestore.v1beta1.Document] has changed.
//
// May be the result of multiple [writes][google.firestore.v1beta1.Write], including deletes, that
// ultimately resulted in a new value for the [Document][google.firestore.v1beta1.Document].
//
// Multiple [DocumentChange][google.firestore.v1beta1.DocumentChange] messages may be returned for the same logical
// change, if multiple targets are affected.
message DocumentChange {
// The new state of the [Document][google.firestore.v1beta1.Document].
//
// If `mask` is set, contains only fields that were updated or added.
Document document = 1;
// A set of target IDs of targets that match this document.
repeated int32 target_ids = 5;
// A set of target IDs for targets that no longer match this document.
repeated int32 removed_target_ids = 6;
}
// A [Document][google.firestore.v1beta1.Document] has been deleted.
//
// May be the result of multiple [writes][google.firestore.v1beta1.Write], including updates, the
// last of which deleted the [Document][google.firestore.v1beta1.Document].
//
// Multiple [DocumentDelete][google.firestore.v1beta1.DocumentDelete] messages may be returned for the same logical
// delete, if multiple targets are affected.
message DocumentDelete {
// The resource name of the [Document][google.firestore.v1beta1.Document] that was deleted.
string document = 1;
// A set of target IDs for targets that previously matched this entity.
repeated int32 removed_target_ids = 6;
// The read timestamp at which the delete was observed.
//
// Greater or equal to the `commit_time` of the delete.
google.protobuf.Timestamp read_time = 4;
}
// A [Document][google.firestore.v1beta1.Document] has been removed from the view of the targets.
//
// Sent if the document is no longer relevant to a target and is out of view.
// Can be sent instead of a DocumentDelete or a DocumentChange if the server
// can not send the new value of the document.
//
// Multiple [DocumentRemove][google.firestore.v1beta1.DocumentRemove] messages may be returned for the same logical
// write or delete, if multiple targets are affected.
message DocumentRemove {
// The resource name of the [Document][google.firestore.v1beta1.Document] that has gone out of view.
string document = 1;
// A set of target IDs for targets that previously matched this document.
repeated int32 removed_target_ids = 2;
// The read timestamp at which the remove was observed.
//
// Greater or equal to the `commit_time` of the change/delete/remove.
google.protobuf.Timestamp read_time = 4;
}
// A digest of all the documents that match a given target.
message ExistenceFilter {
// The target ID to which this filter applies.
int32 target_id = 1;
// The total count of documents that match [target_id][google.firestore.v1beta1.ExistenceFilter.target_id].
//
// If different from the count of documents in the client that match, the
// client must manually determine which documents no longer match the target.
int32 count = 2;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,3 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc

View file

@ -0,0 +1,971 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for representing queries for the Google Cloud Firestore API.
A :class:`~google.cloud.firestore_v1beta1.query.Query` can be created directly
from a :class:`~google.cloud.firestore_v1beta1.collection.Collection`,
and that can be a more common way to create a query than direct usage of the
constructor.
"""
import copy
import math
import warnings
from google.protobuf import wrappers_pb2
import six
from google.cloud.firestore_v1beta1 import _helpers
from google.cloud.firestore_v1beta1 import document
from google.cloud.firestore_v1beta1 import field_path as field_path_module
from google.cloud.firestore_v1beta1 import transforms
from google.cloud.firestore_v1beta1.gapic import enums
from google.cloud.firestore_v1beta1.proto import query_pb2
from google.cloud.firestore_v1beta1.order import Order
from google.cloud.firestore_v1beta1.watch import Watch
_EQ_OP = "=="
_operator_enum = enums.StructuredQuery.FieldFilter.Operator
_COMPARISON_OPERATORS = {
"<": _operator_enum.LESS_THAN,
"<=": _operator_enum.LESS_THAN_OR_EQUAL,
_EQ_OP: _operator_enum.EQUAL,
">=": _operator_enum.GREATER_THAN_OR_EQUAL,
">": _operator_enum.GREATER_THAN,
"array_contains": _operator_enum.ARRAY_CONTAINS,
}
_BAD_OP_STRING = "Operator string {!r} is invalid. Valid choices are: {}."
_BAD_OP_NAN_NULL = 'Only an equality filter ("==") can be used with None or NaN values'
_INVALID_WHERE_TRANSFORM = "Transforms cannot be used as where values."
_BAD_DIR_STRING = "Invalid direction {!r}. Must be one of {!r} or {!r}."
_INVALID_CURSOR_TRANSFORM = "Transforms cannot be used as cursor values."
_MISSING_ORDER_BY = (
'The "order by" field path {!r} is not present in the cursor data {!r}. '
"All fields sent to ``order_by()`` must be present in the fields "
"if passed to one of ``start_at()`` / ``start_after()`` / "
"``end_before()`` / ``end_at()`` to define a cursor."
)
_NO_ORDERS_FOR_CURSOR = (
"Attempting to create a cursor with no fields to order on. "
"When defining a cursor with one of ``start_at()`` / ``start_after()`` / "
"``end_before()`` / ``end_at()``, all fields in the cursor must "
"come from fields set in ``order_by()``."
)
_MISMATCH_CURSOR_W_ORDER_BY = "The cursor {!r} does not match the order fields {!r}."
class Query(object):
"""Represents a query to the Firestore API.
Instances of this class are considered immutable: all methods that
would modify an instance instead return a new instance.
Args:
parent (~.firestore_v1beta1.collection.Collection): The collection
that this query applies to.
projection (Optional[google.cloud.proto.firestore.v1beta1.\
query_pb2.StructuredQuery.Projection]): A projection of document
fields to limit the query results to.
field_filters (Optional[Tuple[google.cloud.proto.firestore.v1beta1.\
query_pb2.StructuredQuery.FieldFilter, ...]]): The filters to be
applied in the query.
orders (Optional[Tuple[google.cloud.proto.firestore.v1beta1.\
query_pb2.StructuredQuery.Order, ...]]): The "order by" entries
to use in the query.
limit (Optional[int]): The maximum number of documents the
query is allowed to return.
offset (Optional[int]): The number of results to skip.
start_at (Optional[Tuple[dict, bool]]): Two-tuple of
* a mapping of fields. Any field that is present in this mapping
must also be present in ``orders``
* an ``after`` flag
The fields and the flag combine to form a cursor used as
a starting point in a query result set. If the ``after``
flag is :data:`True`, the results will start just after any
documents which have fields matching the cursor, otherwise
any matching documents will be included in the result set.
When the query is formed, the document values
will be used in the order given by ``orders``.
end_at (Optional[Tuple[dict, bool]]): Two-tuple of
* a mapping of fields. Any field that is present in this mapping
must also be present in ``orders``
* a ``before`` flag
The fields and the flag combine to form a cursor used as
an ending point in a query result set. If the ``before``
flag is :data:`True`, the results will end just before any
documents which have fields matching the cursor, otherwise
any matching documents will be included in the result set.
When the query is formed, the document values
will be used in the order given by ``orders``.
"""
ASCENDING = "ASCENDING"
"""str: Sort query results in ascending order on a field."""
DESCENDING = "DESCENDING"
"""str: Sort query results in descending order on a field."""
def __init__(
self,
parent,
projection=None,
field_filters=(),
orders=(),
limit=None,
offset=None,
start_at=None,
end_at=None,
):
self._parent = parent
self._projection = projection
self._field_filters = field_filters
self._orders = orders
self._limit = limit
self._offset = offset
self._start_at = start_at
self._end_at = end_at
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self._parent == other._parent
and self._projection == other._projection
and self._field_filters == other._field_filters
and self._orders == other._orders
and self._limit == other._limit
and self._offset == other._offset
and self._start_at == other._start_at
and self._end_at == other._end_at
)
@property
def _client(self):
"""The client of the parent collection.
Returns:
~.firestore_v1beta1.client.Client: The client that owns
this query.
"""
return self._parent._client
def select(self, field_paths):
"""Project documents matching query to a limited set of fields.
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`
for more information on **field paths**.
If the current query already has a projection set (i.e. has already
called :meth:`~google.cloud.firestore_v1beta1.query.Query.select`),
this will overwrite it.
Args:
field_paths (Iterable[str, ...]): An iterable of field paths
(``.``-delimited list of field names) to use as a projection
of document fields in the query results.
Returns:
~.firestore_v1beta1.query.Query: A "projected" query. Acts as
a copy of the current query, modified with the newly added
projection.
Raises:
ValueError: If any ``field_path`` is invalid.
"""
field_paths = list(field_paths)
for field_path in field_paths:
field_path_module.split_field_path(field_path) # raises
new_projection = query_pb2.StructuredQuery.Projection(
fields=[
query_pb2.StructuredQuery.FieldReference(field_path=field_path)
for field_path in field_paths
]
)
return self.__class__(
self._parent,
projection=new_projection,
field_filters=self._field_filters,
orders=self._orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
)
def where(self, field_path, op_string, value):
"""Filter the query on a field.
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`
for more information on **field paths**.
Returns a new :class:`~google.cloud.firestore_v1beta1.query.Query`
that filters on a specific field path, according to an operation
(e.g. ``==`` or "equals") and a particular value to be paired with
that operation.
Args:
field_path (str): A field path (``.``-delimited list of
field names) for the field to filter on.
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
value (Any): The value to compare the field against in the filter.
If ``value`` is :data:`None` or a NaN, then ``==`` is the only
allowed operation.
Returns:
~.firestore_v1beta1.query.Query: A filtered query. Acts as a
copy of the current query, modified with the newly added filter.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``value`` is a NaN or :data:`None` and
``op_string`` is not ``==``.
"""
field_path_module.split_field_path(field_path) # raises
if value is None:
if op_string != _EQ_OP:
raise ValueError(_BAD_OP_NAN_NULL)
filter_pb = query_pb2.StructuredQuery.UnaryFilter(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
op=enums.StructuredQuery.UnaryFilter.Operator.IS_NULL,
)
elif _isnan(value):
if op_string != _EQ_OP:
raise ValueError(_BAD_OP_NAN_NULL)
filter_pb = query_pb2.StructuredQuery.UnaryFilter(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
op=enums.StructuredQuery.UnaryFilter.Operator.IS_NAN,
)
elif isinstance(value, (transforms.Sentinel, transforms._ValueList)):
raise ValueError(_INVALID_WHERE_TRANSFORM)
else:
filter_pb = query_pb2.StructuredQuery.FieldFilter(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
op=_enum_from_op_string(op_string),
value=_helpers.encode_value(value),
)
new_filters = self._field_filters + (filter_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=new_filters,
orders=self._orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
)
@staticmethod
def _make_order(field_path, direction):
"""Helper for :meth:`order_by`."""
return query_pb2.StructuredQuery.Order(
field=query_pb2.StructuredQuery.FieldReference(field_path=field_path),
direction=_enum_from_direction(direction),
)
def order_by(self, field_path, direction=ASCENDING):
"""Modify the query to add an order clause on a specific field.
See :meth:`~google.cloud.firestore_v1beta1.client.Client.field_path`
for more information on **field paths**.
Successive :meth:`~google.cloud.firestore_v1beta1.query.Query.order_by` calls
will further refine the ordering of results returned by the query
(i.e. the new "order by" fields will be added to existing ones).
Args:
field_path (str): A field path (``.``-delimited list of
field names) on which to order the query results.
direction (Optional[str]): The direction to order by. Must be one
of :attr:`ASCENDING` or :attr:`DESCENDING`, defaults to
:attr:`ASCENDING`.
Returns:
~.firestore_v1beta1.query.Query: An ordered query. Acts as a
copy of the current query, modified with the newly added
"order by" constraint.
Raises:
ValueError: If ``field_path`` is invalid.
ValueError: If ``direction`` is not one of :attr:`ASCENDING` or
:attr:`DESCENDING`.
"""
field_path_module.split_field_path(field_path) # raises
order_pb = self._make_order(field_path, direction)
new_orders = self._orders + (order_pb,)
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=new_orders,
limit=self._limit,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
)
def limit(self, count):
"""Limit a query to return a fixed number of results.
If the current query already has a limit set, this will overwrite it.
Args:
count (int): Maximum number of documents to return that match
the query.
Returns:
~.firestore_v1beta1.query.Query: A limited query. Acts as a
copy of the current query, modified with the newly added
"limit" filter.
"""
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=self._orders,
limit=count,
offset=self._offset,
start_at=self._start_at,
end_at=self._end_at,
)
def offset(self, num_to_skip):
"""Skip to an offset in a query.
If the current query already has specified an offset, this will
overwrite it.
Args:
num_to_skip (int): The number of results to skip at the beginning
of query results. (Must be non-negative.)
Returns:
~.firestore_v1beta1.query.Query: An offset query. Acts as a
copy of the current query, modified with the newly added
"offset" field.
"""
return self.__class__(
self._parent,
projection=self._projection,
field_filters=self._field_filters,
orders=self._orders,
limit=self._limit,
offset=num_to_skip,
start_at=self._start_at,
end_at=self._end_at,
)
def _cursor_helper(self, document_fields, before, start):
"""Set values to be used for a ``start_at`` or ``end_at`` cursor.
The values will later be used in a query protobuf.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
before (bool): Flag indicating if the document in
``document_fields`` should (:data:`False`) or
shouldn't (:data:`True`) be included in the result set.
start (Optional[bool]): determines if the cursor is a ``start_at``
cursor (:data:`True`) or an ``end_at`` cursor (:data:`False`).
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start at" cursor.
"""
if isinstance(document_fields, tuple):
document_fields = list(document_fields)
elif isinstance(document_fields, document.DocumentSnapshot):
if document_fields.reference._path[:-1] != self._parent._path:
raise ValueError(
"Cannot use snapshot from another collection as a cursor."
)
else:
# NOTE: We copy so that the caller can't modify after calling.
document_fields = copy.deepcopy(document_fields)
cursor_pair = document_fields, before
query_kwargs = {
"projection": self._projection,
"field_filters": self._field_filters,
"orders": self._orders,
"limit": self._limit,
"offset": self._offset,
}
if start:
query_kwargs["start_at"] = cursor_pair
query_kwargs["end_at"] = self._end_at
else:
query_kwargs["start_at"] = self._start_at
query_kwargs["end_at"] = cursor_pair
return self.__class__(self._parent, **query_kwargs)
def start_at(self, document_fields):
"""Start query results at a particular document value.
The result set will **include** the document specified by
``document_fields``.
If the current query already has specified a start cursor -- either
via this method or
:meth:`~google.cloud.firestore_v1beta1.query.Query.start_after` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start at" cursor.
"""
return self._cursor_helper(document_fields, before=True, start=True)
def start_after(self, document_fields):
"""Start query results after a particular document value.
The result set will **exclude** the document specified by
``document_fields``.
If the current query already has specified a start cursor -- either
via this method or
:meth:`~google.cloud.firestore_v1beta1.query.Query.start_at` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"start after" cursor.
"""
return self._cursor_helper(document_fields, before=False, start=True)
def end_before(self, document_fields):
"""End query results before a particular document value.
The result set will **exclude** the document specified by
``document_fields``.
If the current query already has specified an end cursor -- either
via this method or
:meth:`~google.cloud.firestore_v1beta1.query.Query.end_at` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"end before" cursor.
"""
return self._cursor_helper(document_fields, before=True, start=False)
def end_at(self, document_fields):
"""End query results at a particular document value.
The result set will **include** the document specified by
``document_fields``.
If the current query already has specified an end cursor -- either
via this method or
:meth:`~google.cloud.firestore_v1beta1.query.Query.end_before` -- this will
overwrite it.
When the query is sent to the server, the ``document_fields`` will
be used in the order given by fields set by
:meth:`~google.cloud.firestore_v1beta1.query.Query.order_by`.
Args:
document_fields (Union[~.firestore_v1beta1.\
document.DocumentSnapshot, dict, list, tuple]): a document
snapshot or a dictionary/list/tuple of fields representing a
query results cursor. A cursor is a collection of values that
represent a position in a query result set.
Returns:
~.firestore_v1beta1.query.Query: A query with cursor. Acts as
a copy of the current query, modified with the newly added
"end at" cursor.
"""
return self._cursor_helper(document_fields, before=False, start=False)
def _filters_pb(self):
"""Convert all the filters into a single generic Filter protobuf.
This may be a lone field filter or unary filter, may be a composite
filter or may be :data:`None`.
Returns:
google.cloud.firestore_v1beta1.types.\
StructuredQuery.Filter: A "generic" filter representing the
current query's filters.
"""
num_filters = len(self._field_filters)
if num_filters == 0:
return None
elif num_filters == 1:
return _filter_pb(self._field_filters[0])
else:
composite_filter = query_pb2.StructuredQuery.CompositeFilter(
op=enums.StructuredQuery.CompositeFilter.Operator.AND,
filters=[_filter_pb(filter_) for filter_ in self._field_filters],
)
return query_pb2.StructuredQuery.Filter(composite_filter=composite_filter)
@staticmethod
def _normalize_projection(projection):
"""Helper: convert field paths to message."""
if projection is not None:
fields = list(projection.fields)
if not fields:
field_ref = query_pb2.StructuredQuery.FieldReference(
field_path="__name__"
)
return query_pb2.StructuredQuery.Projection(fields=[field_ref])
return projection
def _normalize_orders(self):
"""Helper: adjust orders based on cursors, where clauses."""
orders = list(self._orders)
_has_snapshot_cursor = False
if self._start_at:
if isinstance(self._start_at[0], document.DocumentSnapshot):
_has_snapshot_cursor = True
if self._end_at:
if isinstance(self._end_at[0], document.DocumentSnapshot):
_has_snapshot_cursor = True
if _has_snapshot_cursor:
should_order = [
_enum_from_op_string(key)
for key in _COMPARISON_OPERATORS
if key not in (_EQ_OP, "array_contains")
]
order_keys = [order.field.field_path for order in orders]
for filter_ in self._field_filters:
field = filter_.field.field_path
if filter_.op in should_order and field not in order_keys:
orders.append(self._make_order(field, "ASCENDING"))
if not orders:
orders.append(self._make_order("__name__", "ASCENDING"))
else:
order_keys = [order.field.field_path for order in orders]
if "__name__" not in order_keys:
direction = orders[-1].direction # enum?
orders.append(self._make_order("__name__", direction))
return orders
def _normalize_cursor(self, cursor, orders):
"""Helper: convert cursor to a list of values based on orders."""
if cursor is None:
return
if not orders:
raise ValueError(_NO_ORDERS_FOR_CURSOR)
document_fields, before = cursor
order_keys = [order.field.field_path for order in orders]
if isinstance(document_fields, document.DocumentSnapshot):
snapshot = document_fields
document_fields = snapshot.to_dict()
document_fields["__name__"] = snapshot.reference
if isinstance(document_fields, dict):
# Transform to list using orders
values = []
data = document_fields
for order_key in order_keys:
try:
values.append(field_path_module.get_nested_value(order_key, data))
except KeyError:
msg = _MISSING_ORDER_BY.format(order_key, data)
raise ValueError(msg)
document_fields = values
if len(document_fields) != len(orders):
msg = _MISMATCH_CURSOR_W_ORDER_BY.format(document_fields, order_keys)
raise ValueError(msg)
_transform_bases = (transforms.Sentinel, transforms._ValueList)
for index, key_field in enumerate(zip(order_keys, document_fields)):
key, field = key_field
if isinstance(field, _transform_bases):
msg = _INVALID_CURSOR_TRANSFORM
raise ValueError(msg)
if key == "__name__" and isinstance(field, six.string_types):
document_fields[index] = self._parent.document(field)
return document_fields, before
def _to_protobuf(self):
"""Convert the current query into the equivalent protobuf.
Returns:
google.cloud.firestore_v1beta1.types.StructuredQuery: The
query protobuf.
"""
projection = self._normalize_projection(self._projection)
orders = self._normalize_orders()
start_at = self._normalize_cursor(self._start_at, orders)
end_at = self._normalize_cursor(self._end_at, orders)
query_kwargs = {
"select": projection,
"from": [
query_pb2.StructuredQuery.CollectionSelector(
collection_id=self._parent.id
)
],
"where": self._filters_pb(),
"order_by": orders,
"start_at": _cursor_pb(start_at),
"end_at": _cursor_pb(end_at),
}
if self._offset is not None:
query_kwargs["offset"] = self._offset
if self._limit is not None:
query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit)
return query_pb2.StructuredQuery(**query_kwargs)
def get(self, transaction=None):
"""Deprecated alias for :meth:`stream`."""
warnings.warn(
"'Query.get' is deprecated: please use 'Query.stream' instead.",
DeprecationWarning,
stacklevel=2,
)
return self.stream(transaction=transaction)
def stream(self, transaction=None):
"""Read the documents in the collection that match this query.
This sends a ``RunQuery`` RPC and then returns an iterator which
consumes each document returned in the stream of ``RunQueryResponse``
messages.
.. note::
The underlying stream of responses will time out after
the ``max_rpc_timeout_millis`` value set in the GAPIC
client configuration for the ``RunQuery`` API. Snapshots
not consumed from the iterator before that point will be lost.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
transaction (Optional[~.firestore_v1beta1.transaction.\
Transaction]): An existing transaction that this query will
run in.
Yields:
~.firestore_v1beta1.document.DocumentSnapshot: The next
document that fulfills the query.
"""
parent_path, expected_prefix = self._parent._parent_info()
response_iterator = self._client._firestore_api.run_query(
parent_path,
self._to_protobuf(),
transaction=_helpers.get_transaction_id(transaction),
metadata=self._client._rpc_metadata,
)
for response in response_iterator:
snapshot = _query_response_to_snapshot(
response, self._parent, expected_prefix
)
if snapshot is not None:
yield snapshot
def on_snapshot(self, callback):
"""Monitor the documents in this collection that match this query.
This starts a watch on this query using a background thread. The
provided callback is run on the snapshot of the documents.
Args:
callback(~.firestore.query.QuerySnapshot): a callback to run when
a change occurs.
Example:
from google.cloud import firestore_v1beta1
db = firestore_v1beta1.Client()
query_ref = db.collection(u'users').where("user", "==", u'Ada')
def on_snapshot(docs, changes, read_time):
for doc in docs:
print(u'{} => {}'.format(doc.id, doc.to_dict()))
# Watch this query
query_watch = query_ref.on_snapshot(on_snapshot)
# Terminate this watch
query_watch.unsubscribe()
"""
return Watch.for_query(
self, callback, document.DocumentSnapshot, document.DocumentReference
)
def _comparator(self, doc1, doc2):
_orders = self._orders
# Add implicit sorting by name, using the last specified direction.
if len(_orders) == 0:
lastDirection = Query.ASCENDING
else:
if _orders[-1].direction == 1:
lastDirection = Query.ASCENDING
else:
lastDirection = Query.DESCENDING
orderBys = list(_orders)
order_pb = query_pb2.StructuredQuery.Order(
field=query_pb2.StructuredQuery.FieldReference(field_path="id"),
direction=_enum_from_direction(lastDirection),
)
orderBys.append(order_pb)
for orderBy in orderBys:
if orderBy.field.field_path == "id":
# If ordering by docuent id, compare resource paths.
comp = Order()._compare_to(doc1.reference._path, doc2.reference._path)
else:
if (
orderBy.field.field_path not in doc1._data
or orderBy.field.field_path not in doc2._data
):
raise ValueError(
"Can only compare fields that exist in the "
"DocumentSnapshot. Please include the fields you are "
"ordering on in your select() call."
)
v1 = doc1._data[orderBy.field.field_path]
v2 = doc2._data[orderBy.field.field_path]
encoded_v1 = _helpers.encode_value(v1)
encoded_v2 = _helpers.encode_value(v2)
comp = Order().compare(encoded_v1, encoded_v2)
if comp != 0:
# 1 == Ascending, -1 == Descending
return orderBy.direction * comp
return 0
def _enum_from_op_string(op_string):
"""Convert a string representation of a binary operator to an enum.
These enums come from the protobuf message definition
``StructuredQuery.FieldFilter.Operator``.
Args:
op_string (str): A comparison operation in the form of a string.
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
and ``>``.
Returns:
int: The enum corresponding to ``op_string``.
Raises:
ValueError: If ``op_string`` is not a valid operator.
"""
try:
return _COMPARISON_OPERATORS[op_string]
except KeyError:
choices = ", ".join(sorted(_COMPARISON_OPERATORS.keys()))
msg = _BAD_OP_STRING.format(op_string, choices)
raise ValueError(msg)
def _isnan(value):
"""Check if a value is NaN.
This differs from ``math.isnan`` in that **any** input type is
allowed.
Args:
value (Any): A value to check for NaN-ness.
Returns:
bool: Indicates if the value is the NaN float.
"""
if isinstance(value, float):
return math.isnan(value)
else:
return False
def _enum_from_direction(direction):
"""Convert a string representation of a direction to an enum.
Args:
direction (str): A direction to order by. Must be one of
:attr:`~google.cloud.firestore.Query.ASCENDING` or
:attr:`~google.cloud.firestore.Query.DESCENDING`.
Returns:
int: The enum corresponding to ``direction``.
Raises:
ValueError: If ``direction`` is not a valid direction.
"""
if isinstance(direction, int):
return direction
if direction == Query.ASCENDING:
return enums.StructuredQuery.Direction.ASCENDING
elif direction == Query.DESCENDING:
return enums.StructuredQuery.Direction.DESCENDING
else:
msg = _BAD_DIR_STRING.format(direction, Query.ASCENDING, Query.DESCENDING)
raise ValueError(msg)
def _filter_pb(field_or_unary):
"""Convert a specific protobuf filter to the generic filter type.
Args:
field_or_unary (Union[google.cloud.proto.firestore.v1beta1.\
query_pb2.StructuredQuery.FieldFilter, google.cloud.proto.\
firestore.v1beta1.query_pb2.StructuredQuery.FieldFilter]): A
field or unary filter to convert to a generic filter.
Returns:
google.cloud.firestore_v1beta1.types.\
StructuredQuery.Filter: A "generic" filter.
Raises:
ValueError: If ``field_or_unary`` is not a field or unary filter.
"""
if isinstance(field_or_unary, query_pb2.StructuredQuery.FieldFilter):
return query_pb2.StructuredQuery.Filter(field_filter=field_or_unary)
elif isinstance(field_or_unary, query_pb2.StructuredQuery.UnaryFilter):
return query_pb2.StructuredQuery.Filter(unary_filter=field_or_unary)
else:
raise ValueError("Unexpected filter type", type(field_or_unary), field_or_unary)
def _cursor_pb(cursor_pair):
"""Convert a cursor pair to a protobuf.
If ``cursor_pair`` is :data:`None`, just returns :data:`None`.
Args:
cursor_pair (Optional[Tuple[list, bool]]): Two-tuple of
* a list of field values.
* a ``before`` flag
Returns:
Optional[google.cloud.firestore_v1beta1.types.Cursor]: A
protobuf cursor corresponding to the values.
"""
if cursor_pair is not None:
data, before = cursor_pair
value_pbs = [_helpers.encode_value(value) for value in data]
return query_pb2.Cursor(values=value_pbs, before=before)
def _query_response_to_snapshot(response_pb, collection, expected_prefix):
"""Parse a query response protobuf to a document snapshot.
Args:
response_pb (google.cloud.proto.firestore.v1beta1.\
firestore_pb2.RunQueryResponse): A
collection (~.firestore_v1beta1.collection.CollectionReference): A
reference to the collection that initiated the query.
expected_prefix (str): The expected prefix for fully-qualified
document names returned in the query results. This can be computed
directly from ``collection`` via :meth:`_parent_info`.
Returns:
Optional[~.firestore.document.DocumentSnapshot]: A
snapshot of the data returned in the query. If ``response_pb.document``
is not set, the snapshot will be :data:`None`.
"""
if not response_pb.HasField("document"):
return None
document_id = _helpers.get_doc_id(response_pb.document, expected_prefix)
reference = collection.document(document_id)
data = _helpers.decode_dict(response_pb.document.fields, collection._client)
snapshot = document.DocumentSnapshot(
reference,
data,
exists=True,
read_time=response_pb.read_time,
create_time=response_pb.document.create_time,
update_time=response_pb.document.update_time,
)
return snapshot

View file

@ -0,0 +1,409 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for applying Google Cloud Firestore changes in a transaction."""
import random
import time
import six
from google.api_core import exceptions
from google.cloud.firestore_v1beta1 import batch
from google.cloud.firestore_v1beta1 import types
MAX_ATTEMPTS = 5
"""int: Default number of transaction attempts (with retries)."""
_CANT_BEGIN = "The transaction has already begun. Current transaction ID: {!r}."
_MISSING_ID_TEMPLATE = "The transaction has no transaction ID, so it cannot be {}."
_CANT_ROLLBACK = _MISSING_ID_TEMPLATE.format("rolled back")
_CANT_COMMIT = _MISSING_ID_TEMPLATE.format("committed")
_WRITE_READ_ONLY = "Cannot perform write operation in read-only transaction."
_INITIAL_SLEEP = 1.0
"""float: Initial "max" for sleep interval. To be used in :func:`_sleep`."""
_MAX_SLEEP = 30.0
"""float: Eventual "max" sleep time. To be used in :func:`_sleep`."""
_MULTIPLIER = 2.0
"""float: Multiplier for exponential backoff. To be used in :func:`_sleep`."""
_EXCEED_ATTEMPTS_TEMPLATE = "Failed to commit transaction in {:d} attempts."
_CANT_RETRY_READ_ONLY = "Only read-write transactions can be retried."
class Transaction(batch.WriteBatch):
"""Accumulate read-and-write operations to be sent in a transaction.
Args:
client (~.firestore_v1beta1.client.Client): The client that
created this transaction.
max_attempts (Optional[int]): The maximum number of attempts for
the transaction (i.e. allowing retries). Defaults to
:attr:`~google.cloud.firestore_v1beta1.transaction.MAX_ATTEMPTS`.
read_only (Optional[bool]): Flag indicating if the transaction
should be read-only or should allow writes. Defaults to
:data:`False`.
"""
def __init__(self, client, max_attempts=MAX_ATTEMPTS, read_only=False):
super(Transaction, self).__init__(client)
self._max_attempts = max_attempts
self._read_only = read_only
self._id = None
def _add_write_pbs(self, write_pbs):
"""Add `Write`` protobufs to this transaction.
Args:
write_pbs (List[google.cloud.proto.firestore.v1beta1.\
write_pb2.Write]): A list of write protobufs to be added.
Raises:
ValueError: If this transaction is read-only.
"""
if self._read_only:
raise ValueError(_WRITE_READ_ONLY)
super(Transaction, self)._add_write_pbs(write_pbs)
def _options_protobuf(self, retry_id):
"""Convert the current object to protobuf.
The ``retry_id`` value is used when retrying a transaction that
failed (e.g. due to contention). It is intended to be the "first"
transaction that failed (i.e. if multiple retries are needed).
Args:
retry_id (Union[bytes, NoneType]): Transaction ID of a transaction
to be retried.
Returns:
Optional[google.cloud.firestore_v1beta1.types.TransactionOptions]:
The protobuf ``TransactionOptions`` if ``read_only==True`` or if
there is a transaction ID to be retried, else :data:`None`.
Raises:
ValueError: If ``retry_id`` is not :data:`None` but the
transaction is read-only.
"""
if retry_id is not None:
if self._read_only:
raise ValueError(_CANT_RETRY_READ_ONLY)
return types.TransactionOptions(
read_write=types.TransactionOptions.ReadWrite(
retry_transaction=retry_id
)
)
elif self._read_only:
return types.TransactionOptions(
read_only=types.TransactionOptions.ReadOnly()
)
else:
return None
@property
def in_progress(self):
"""Determine if this transaction has already begun.
Returns:
bool: Indicates if the transaction has started.
"""
return self._id is not None
@property
def id(self):
"""Get the current transaction ID.
Returns:
Optional[bytes]: The transaction ID (or :data:`None` if the
current transaction is not in progress).
"""
return self._id
def _begin(self, retry_id=None):
"""Begin the transaction.
Args:
retry_id (Optional[bytes]): Transaction ID of a transaction to be
retried.
Raises:
ValueError: If the current transaction has already begun.
"""
if self.in_progress:
msg = _CANT_BEGIN.format(self._id)
raise ValueError(msg)
transaction_response = self._client._firestore_api.begin_transaction(
self._client._database_string,
options_=self._options_protobuf(retry_id),
metadata=self._client._rpc_metadata,
)
self._id = transaction_response.transaction
def _clean_up(self):
"""Clean up the instance after :meth:`_rollback`` or :meth:`_commit``.
This intended to occur on success or failure of the associated RPCs.
"""
self._write_pbs = []
self._id = None
def _rollback(self):
"""Roll back the transaction.
Raises:
ValueError: If no transaction is in progress.
"""
if not self.in_progress:
raise ValueError(_CANT_ROLLBACK)
try:
# NOTE: The response is just ``google.protobuf.Empty``.
self._client._firestore_api.rollback(
self._client._database_string,
self._id,
metadata=self._client._rpc_metadata,
)
finally:
self._clean_up()
def _commit(self):
"""Transactionally commit the changes accumulated.
Returns:
List[google.cloud.proto.firestore.v1beta1.\
write_pb2.WriteResult, ...]: The write results corresponding
to the changes committed, returned in the same order as the
changes were applied to this transaction. A write result contains
an ``update_time`` field.
Raises:
ValueError: If no transaction is in progress.
"""
if not self.in_progress:
raise ValueError(_CANT_COMMIT)
commit_response = _commit_with_retry(self._client, self._write_pbs, self._id)
self._clean_up()
return list(commit_response.write_results)
class _Transactional(object):
"""Provide a callable object to use as a transactional decorater.
This is surfaced via
:func:`~google.cloud.firestore_v1beta1.transaction.transactional`.
Args:
to_wrap (Callable[~.firestore_v1beta1.transaction.Transaction, \
Any]): A callable that should be run (and retried) in a
transaction.
"""
def __init__(self, to_wrap):
self.to_wrap = to_wrap
self.current_id = None
"""Optional[bytes]: The current transaction ID."""
self.retry_id = None
"""Optional[bytes]: The ID of the first attempted transaction."""
def _reset(self):
"""Unset the transaction IDs."""
self.current_id = None
self.retry_id = None
def _pre_commit(self, transaction, *args, **kwargs):
"""Begin transaction and call the wrapped callable.
If the callable raises an exception, the transaction will be rolled
back. If not, the transaction will be "ready" for ``Commit`` (i.e.
it will have staged writes).
Args:
transaction (~.firestore_v1beta1.transaction.Transaction): A
transaction to execute the callable within.
args (Tuple[Any, ...]): The extra positional arguments to pass
along to the wrapped callable.
kwargs (Dict[str, Any]): The extra keyword arguments to pass
along to the wrapped callable.
Returns:
Any: result of the wrapped callable.
Raises:
Exception: Any failure caused by ``to_wrap``.
"""
# Force the ``transaction`` to be not "in progress".
transaction._clean_up()
transaction._begin(retry_id=self.retry_id)
# Update the stored transaction IDs.
self.current_id = transaction._id
if self.retry_id is None:
self.retry_id = self.current_id
try:
return self.to_wrap(transaction, *args, **kwargs)
except: # noqa
# NOTE: If ``rollback`` fails this will lose the information
# from the original failure.
transaction._rollback()
raise
def _maybe_commit(self, transaction):
"""Try to commit the transaction.
If the transaction is read-write and the ``Commit`` fails with the
``ABORTED`` status code, it will be retried. Any other failure will
not be caught.
Args:
transaction (~.firestore_v1beta1.transaction.Transaction): The
transaction to be ``Commit``-ed.
Returns:
bool: Indicating if the commit succeeded.
"""
try:
transaction._commit()
return True
except exceptions.GoogleAPICallError as exc:
if transaction._read_only:
raise
if isinstance(exc, exceptions.Aborted):
# If a read-write transaction returns ABORTED, retry.
return False
else:
raise
def __call__(self, transaction, *args, **kwargs):
"""Execute the wrapped callable within a transaction.
Args:
transaction (~.firestore_v1beta1.transaction.Transaction): A
transaction to execute the callable within.
args (Tuple[Any, ...]): The extra positional arguments to pass
along to the wrapped callable.
kwargs (Dict[str, Any]): The extra keyword arguments to pass
along to the wrapped callable.
Returns:
Any: The result of the wrapped callable.
Raises:
ValueError: If the transaction does not succeed in
``max_attempts``.
"""
self._reset()
for attempt in six.moves.xrange(transaction._max_attempts):
result = self._pre_commit(transaction, *args, **kwargs)
succeeded = self._maybe_commit(transaction)
if succeeded:
return result
# Subsequent requests will use the failed transaction ID as part of
# the ``BeginTransactionRequest`` when restarting this transaction
# (via ``options.retry_transaction``). This preserves the "spot in
# line" of the transaction, so exponential backoff is not required
# in this case.
transaction._rollback()
msg = _EXCEED_ATTEMPTS_TEMPLATE.format(transaction._max_attempts)
raise ValueError(msg)
def transactional(to_wrap):
"""Decorate a callable so that it runs in a transaction.
Args:
to_wrap (Callable[~.firestore_v1beta1.transaction.Transaction, \
Any]): A callable that should be run (and retried) in a
transaction.
Returns:
Callable[~.firestore_v1beta1.transaction.Transaction, Any]: the
wrapped callable.
"""
return _Transactional(to_wrap)
def _commit_with_retry(client, write_pbs, transaction_id):
"""Call ``Commit`` on the GAPIC client with retry / sleep.
Retries the ``Commit`` RPC on Unavailable. Usually this RPC-level
retry is handled by the underlying GAPICd client, but in this case it
doesn't because ``Commit`` is not always idempotent. But here we know it
is "idempotent"-like because it has a transaction ID. We also need to do
our own retry to special-case the ``INVALID_ARGUMENT`` error.
Args:
client (~.firestore_v1beta1.client.Client): A client with
GAPIC client and configuration details.
write_pbs (List[google.cloud.proto.firestore.v1beta1.\
write_pb2.Write, ...]): A ``Write`` protobuf instance to
be committed.
transaction_id (bytes): ID of an existing transaction that
this commit will run in.
Returns:
google.cloud.firestore_v1beta1.types.CommitResponse:
The protobuf response from ``Commit``.
Raises:
~google.api_core.exceptions.GoogleAPICallError: If a non-retryable
exception is encountered.
"""
current_sleep = _INITIAL_SLEEP
while True:
try:
return client._firestore_api.commit(
client._database_string,
write_pbs,
transaction=transaction_id,
metadata=client._rpc_metadata,
)
except exceptions.ServiceUnavailable:
# Retry
pass
current_sleep = _sleep(current_sleep)
def _sleep(current_sleep, max_sleep=_MAX_SLEEP, multiplier=_MULTIPLIER):
"""Sleep and produce a new sleep time.
.. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\
2015/03/backoff.html
Select a duration between zero and ``current_sleep``. It might seem
counterintuitive to have so much jitter, but
`Exponential Backoff And Jitter`_ argues that "full jitter" is
the best strategy.
Args:
current_sleep (float): The current "max" for sleep interval.
max_sleep (Optional[float]): Eventual "max" sleep time
multiplier (Optional[float]): Multiplier for exponential backoff.
Returns:
float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever
is smaller)
"""
actual_sleep = random.uniform(0.0, current_sleep)
time.sleep(actual_sleep)
return min(multiplier * current_sleep, max_sleep)

View file

@ -0,0 +1,90 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpful constants to use for Google Cloud Firestore."""
class Sentinel(object):
"""Sentinel objects used to signal special handling."""
__slots__ = ("description",)
def __init__(self, description):
self.description = description
def __repr__(self):
return "Sentinel: {}".format(self.description)
DELETE_FIELD = Sentinel("Value used to delete a field in a document.")
SERVER_TIMESTAMP = Sentinel(
"Value used to set a document field to the server timestamp."
)
class _ValueList(object):
"""Read-only list of values.
Args:
values (List | Tuple): values held in the helper.
"""
slots = ("_values",)
def __init__(self, values):
if not isinstance(values, (list, tuple)):
raise ValueError("'values' must be a list or tuple.")
if len(values) == 0:
raise ValueError("'values' must be non-empty.")
self._values = list(values)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._values == other._values
@property
def values(self):
"""Values to append.
Returns (List):
values to be appended by the transform.
"""
return self._values
class ArrayUnion(_ValueList):
"""Field transform: appends missing values to an array field.
See:
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1beta1#google.firestore.v1beta1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1beta1.ArrayValue.google.firestore.v1beta1.DocumentTransform.FieldTransform.append_missing_elements
Args:
values (List | Tuple): values to append.
"""
class ArrayRemove(_ValueList):
"""Field transform: remove values from an array field.
See:
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1beta1#google.firestore.v1beta1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1beta1.ArrayValue.google.firestore.v1beta1.DocumentTransform.FieldTransform.remove_all_from_array
Args:
values (List | Tuple): values to remove.
"""

View file

@ -0,0 +1,63 @@
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.protobuf import wrappers_pb2
from google.rpc import status_pb2
from google.type import latlng_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.firestore_v1beta1.proto import common_pb2
from google.cloud.firestore_v1beta1.proto import document_pb2
from google.cloud.firestore_v1beta1.proto import firestore_pb2
from google.cloud.firestore_v1beta1.proto import query_pb2
from google.cloud.firestore_v1beta1.proto import write_pb2
_shared_modules = [
http_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
struct_pb2,
timestamp_pb2,
wrappers_pb2,
status_pb2,
latlng_pb2,
]
_local_modules = [common_pb2, document_pb2, firestore_pb2, query_pb2, write_pb2]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.firestore_v1beta1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))

View file

@ -0,0 +1,722 @@
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import collections
import threading
import datetime
from enum import Enum
import functools
import pytz
from google.api_core.bidi import ResumableBidiRpc
from google.api_core.bidi import BackgroundConsumer
from google.cloud.firestore_v1beta1.proto import firestore_pb2
from google.cloud.firestore_v1beta1 import _helpers
from google.api_core import exceptions
import grpc
"""Python client for Google Cloud Firestore Watch."""
_LOGGER = logging.getLogger(__name__)
WATCH_TARGET_ID = 0x5079 # "Py"
GRPC_STATUS_CODE = {
"OK": 0,
"CANCELLED": 1,
"UNKNOWN": 2,
"INVALID_ARGUMENT": 3,
"DEADLINE_EXCEEDED": 4,
"NOT_FOUND": 5,
"ALREADY_EXISTS": 6,
"PERMISSION_DENIED": 7,
"UNAUTHENTICATED": 16,
"RESOURCE_EXHAUSTED": 8,
"FAILED_PRECONDITION": 9,
"ABORTED": 10,
"OUT_OF_RANGE": 11,
"UNIMPLEMENTED": 12,
"INTERNAL": 13,
"UNAVAILABLE": 14,
"DATA_LOSS": 15,
"DO_NOT_USE": -1,
}
_RPC_ERROR_THREAD_NAME = "Thread-OnRpcTerminated"
_RETRYABLE_STREAM_ERRORS = (
exceptions.DeadlineExceeded,
exceptions.ServiceUnavailable,
exceptions.InternalServerError,
exceptions.Unknown,
exceptions.GatewayTimeout,
)
DocTreeEntry = collections.namedtuple("DocTreeEntry", ["value", "index"])
class WatchDocTree(object):
# TODO: Currently this uses a dict. Other implementations us an rbtree.
# The performance of this implementation should be investigated and may
# require modifying the underlying datastructure to a rbtree.
def __init__(self):
self._dict = {}
self._index = 0
def keys(self):
return list(self._dict.keys())
def _copy(self):
wdt = WatchDocTree()
wdt._dict = self._dict.copy()
wdt._index = self._index
self = wdt
return self
def insert(self, key, value):
self = self._copy()
self._dict[key] = DocTreeEntry(value, self._index)
self._index += 1
return self
def find(self, key):
return self._dict[key]
def remove(self, key):
self = self._copy()
del self._dict[key]
return self
def __iter__(self):
for k in self._dict:
yield k
def __len__(self):
return len(self._dict)
def __contains__(self, k):
return k in self._dict
class ChangeType(Enum):
ADDED = 1
REMOVED = 2
MODIFIED = 3
class DocumentChange(object):
def __init__(self, type, document, old_index, new_index):
"""DocumentChange
Args:
type (ChangeType):
document (document.DocumentSnapshot):
old_index (int):
new_index (int):
"""
# TODO: spec indicated an isEqual param also
self.type = type
self.document = document
self.old_index = old_index
self.new_index = new_index
class WatchResult(object):
def __init__(self, snapshot, name, change_type):
self.snapshot = snapshot
self.name = name
self.change_type = change_type
def _maybe_wrap_exception(exception):
"""Wraps a gRPC exception class, if needed."""
if isinstance(exception, grpc.RpcError):
return exceptions.from_grpc_error(exception)
return exception
def document_watch_comparator(doc1, doc2):
assert doc1 == doc2, "Document watches only support one document."
return 0
class Watch(object):
BackgroundConsumer = BackgroundConsumer # FBO unit tests
ResumableBidiRpc = ResumableBidiRpc # FBO unit tests
def __init__(
self,
document_reference,
firestore,
target,
comparator,
snapshot_callback,
document_snapshot_cls,
document_reference_cls,
BackgroundConsumer=None, # FBO unit testing
ResumableBidiRpc=None, # FBO unit testing
):
"""
Args:
firestore:
target:
comparator:
snapshot_callback: Callback method to process snapshots.
Args:
docs (List(DocumentSnapshot)): A callback that returns the
ordered list of documents stored in this snapshot.
changes (List(str)): A callback that returns the list of
changed documents since the last snapshot delivered for
this watch.
read_time (string): The ISO 8601 time at which this
snapshot was obtained.
document_snapshot_cls: instance of DocumentSnapshot
document_reference_cls: instance of DocumentReference
"""
self._document_reference = document_reference
self._firestore = firestore
self._api = firestore._firestore_api
self._targets = target
self._comparator = comparator
self.DocumentSnapshot = document_snapshot_cls
self.DocumentReference = document_reference_cls
self._snapshot_callback = snapshot_callback
self._closing = threading.Lock()
self._closed = False
def should_recover(exc): # pragma: NO COVER
return (
isinstance(exc, grpc.RpcError)
and exc.code() == grpc.StatusCode.UNAVAILABLE
)
initial_request = firestore_pb2.ListenRequest(
database=self._firestore._database_string, add_target=self._targets
)
if ResumableBidiRpc is None:
ResumableBidiRpc = self.ResumableBidiRpc # FBO unit tests
self._rpc = ResumableBidiRpc(
self._api.transport.listen,
initial_request=initial_request,
should_recover=should_recover,
metadata=self._firestore._rpc_metadata,
)
self._rpc.add_done_callback(self._on_rpc_done)
# Initialize state for on_snapshot
# The sorted tree of QueryDocumentSnapshots as sent in the last
# snapshot. We only look at the keys.
self.doc_tree = WatchDocTree()
# A map of document names to QueryDocumentSnapshots for the last sent
# snapshot.
self.doc_map = {}
# The accumulates map of document changes (keyed by document name) for
# the current snapshot.
self.change_map = {}
# The current state of the query results.
self.current = False
# We need this to track whether we've pushed an initial set of changes,
# since we should push those even when there are no changes, if there
# aren't docs.
self.has_pushed = False
# The server assigns and updates the resume token.
self.resume_token = None
if BackgroundConsumer is None: # FBO unit tests
BackgroundConsumer = self.BackgroundConsumer
self._consumer = BackgroundConsumer(self._rpc, self.on_snapshot)
self._consumer.start()
@property
def is_active(self):
"""bool: True if this manager is actively streaming.
Note that ``False`` does not indicate this is complete shut down,
just that it stopped getting new messages.
"""
return self._consumer is not None and self._consumer.is_active
def close(self, reason=None):
"""Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown.
"""
with self._closing:
if self._closed:
return
# Stop consuming messages.
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
self._rpc.close()
self._rpc = None
self._closed = True
_LOGGER.debug("Finished stopping manager.")
if reason:
# Raise an exception if a reason is provided
_LOGGER.debug("reason for closing: %s" % reason)
if isinstance(reason, Exception):
raise reason
raise RuntimeError(reason)
def _on_rpc_done(self, future):
"""Triggered whenever the underlying RPC terminates without recovery.
This is typically triggered from one of two threads: the background
consumer thread (when calling ``recv()`` produces a non-recoverable
error) or the grpc management thread (when cancelling the RPC).
This method is *non-blocking*. It will start another thread to deal
with shutting everything down. This is to prevent blocking in the
background consumer and preventing it from being ``joined()``.
"""
_LOGGER.info("RPC termination has signaled manager shutdown.")
future = _maybe_wrap_exception(future)
thread = threading.Thread(
name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future}
)
thread.daemon = True
thread.start()
def unsubscribe(self):
self.close()
@classmethod
def for_document(
cls,
document_ref,
snapshot_callback,
snapshot_class_instance,
reference_class_instance,
):
"""
Creates a watch snapshot listener for a document. snapshot_callback
receives a DocumentChange object, but may also start to get
targetChange and such soon
Args:
document_ref: Reference to Document
snapshot_callback: callback to be called on snapshot
snapshot_class_instance: instance of DocumentSnapshot to make
snapshots with to pass to snapshot_callback
reference_class_instance: instance of DocumentReference to make
references
"""
return cls(
document_ref,
document_ref._client,
{
"documents": {"documents": [document_ref._document_path]},
"target_id": WATCH_TARGET_ID,
},
document_watch_comparator,
snapshot_callback,
snapshot_class_instance,
reference_class_instance,
)
@classmethod
def for_query(
cls, query, snapshot_callback, snapshot_class_instance, reference_class_instance
):
query_target = firestore_pb2.Target.QueryTarget(
parent=query._client._database_string, structured_query=query._to_protobuf()
)
return cls(
query,
query._client,
{"query": query_target, "target_id": WATCH_TARGET_ID},
query._comparator,
snapshot_callback,
snapshot_class_instance,
reference_class_instance,
)
def _on_snapshot_target_change_no_change(self, proto):
_LOGGER.debug("on_snapshot: target change: NO_CHANGE")
change = proto.target_change
no_target_ids = change.target_ids is None or len(change.target_ids) == 0
if no_target_ids and change.read_time and self.current:
# TargetChange.CURRENT followed by TargetChange.NO_CHANGE
# signals a consistent state. Invoke the onSnapshot
# callback as specified by the user.
self.push(change.read_time, change.resume_token)
def _on_snapshot_target_change_add(self, proto):
_LOGGER.debug("on_snapshot: target change: ADD")
target_id = proto.target_change.target_ids[0]
if target_id != WATCH_TARGET_ID:
raise RuntimeError("Unexpected target ID %s sent by server" % target_id)
def _on_snapshot_target_change_remove(self, proto):
_LOGGER.debug("on_snapshot: target change: REMOVE")
change = proto.target_change
code = 13
message = "internal error"
if change.cause:
code = change.cause.code
message = change.cause.message
message = "Error %s: %s" % (code, message)
raise RuntimeError(message)
def _on_snapshot_target_change_reset(self, proto):
# Whatever changes have happened so far no longer matter.
_LOGGER.debug("on_snapshot: target change: RESET")
self._reset_docs()
def _on_snapshot_target_change_current(self, proto):
_LOGGER.debug("on_snapshot: target change: CURRENT")
self.current = True
def on_snapshot(self, proto):
"""
Called everytime there is a response from listen. Collect changes
and 'push' the changes in a batch to the customer when we receive
'current' from the listen response.
Args:
listen_response(`google.cloud.firestore_v1beta1.types.ListenResponse`):
Callback method that receives a object to
"""
TargetChange = firestore_pb2.TargetChange
target_changetype_dispatch = {
TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change,
TargetChange.ADD: self._on_snapshot_target_change_add,
TargetChange.REMOVE: self._on_snapshot_target_change_remove,
TargetChange.RESET: self._on_snapshot_target_change_reset,
TargetChange.CURRENT: self._on_snapshot_target_change_current,
}
target_change = proto.target_change
if str(target_change):
target_change_type = target_change.target_change_type
_LOGGER.debug("on_snapshot: target change: " + str(target_change_type))
meth = target_changetype_dispatch.get(target_change_type)
if meth is None:
_LOGGER.info(
"on_snapshot: Unknown target change " + str(target_change_type)
)
self.close(
reason="Unknown target change type: %s " % str(target_change_type)
)
else:
try:
meth(proto)
except Exception as exc2:
_LOGGER.debug("meth(proto) exc: " + str(exc2))
raise
# NOTE:
# in other implementations, such as node, the backoff is reset here
# in this version bidi rpc is just used and will control this.
elif str(proto.document_change):
_LOGGER.debug("on_snapshot: document change")
# No other target_ids can show up here, but we still need to see
# if the targetId was in the added list or removed list.
target_ids = proto.document_change.target_ids or []
removed_target_ids = proto.document_change.removed_target_ids or []
changed = False
removed = False
if WATCH_TARGET_ID in target_ids:
changed = True
if WATCH_TARGET_ID in removed_target_ids:
removed = True
if changed:
_LOGGER.debug("on_snapshot: document change: CHANGED")
# google.cloud.firestore_v1beta1.types.DocumentChange
document_change = proto.document_change
# google.cloud.firestore_v1beta1.types.Document
document = document_change.document
data = _helpers.decode_dict(document.fields, self._firestore)
# Create a snapshot. As Document and Query objects can be
# passed we need to get a Document Reference in a more manual
# fashion than self._document_reference
document_name = document.name
db_str = self._firestore._database_string
db_str_documents = db_str + "/documents/"
if document_name.startswith(db_str_documents):
document_name = document_name[len(db_str_documents) :]
document_ref = self._firestore.document(document_name)
snapshot = self.DocumentSnapshot(
reference=document_ref,
data=data,
exists=True,
read_time=None,
create_time=document.create_time,
update_time=document.update_time,
)
self.change_map[document.name] = snapshot
elif removed:
_LOGGER.debug("on_snapshot: document change: REMOVED")
document = proto.document_change.document
self.change_map[document.name] = ChangeType.REMOVED
# NB: document_delete and document_remove (as far as we, the client,
# are concerned) are functionally equivalent
elif str(proto.document_delete):
_LOGGER.debug("on_snapshot: document change: DELETE")
name = proto.document_delete.document
self.change_map[name] = ChangeType.REMOVED
elif str(proto.document_remove):
_LOGGER.debug("on_snapshot: document change: REMOVE")
name = proto.document_remove.document
self.change_map[name] = ChangeType.REMOVED
elif proto.filter:
_LOGGER.debug("on_snapshot: filter update")
if proto.filter.count != self._current_size():
# We need to remove all the current results.
self._reset_docs()
# The filter didn't match, so re-issue the query.
# TODO: reset stream method?
# self._reset_stream();
else:
_LOGGER.debug("UNKNOWN TYPE. UHOH")
self.close(reason=ValueError("Unknown listen response type: %s" % proto))
def push(self, read_time, next_resume_token):
"""
Assembles a new snapshot from the current set of changes and invokes
the user's callback. Clears the current changes on completion.
"""
deletes, adds, updates = Watch._extract_changes(
self.doc_map, self.change_map, read_time
)
updated_tree, updated_map, appliedChanges = self._compute_snapshot(
self.doc_tree, self.doc_map, deletes, adds, updates
)
if not self.has_pushed or len(appliedChanges):
# TODO: It is possible in the future we will have the tree order
# on insert. For now, we sort here.
key = functools.cmp_to_key(self._comparator)
keys = sorted(updated_tree.keys(), key=key)
self._snapshot_callback(
keys,
appliedChanges,
datetime.datetime.fromtimestamp(read_time.seconds, pytz.utc),
)
self.has_pushed = True
self.doc_tree = updated_tree
self.doc_map = updated_map
self.change_map.clear()
self.resume_token = next_resume_token
@staticmethod
def _extract_changes(doc_map, changes, read_time):
deletes = []
adds = []
updates = []
for name, value in changes.items():
if value == ChangeType.REMOVED:
if name in doc_map:
deletes.append(name)
elif name in doc_map:
if read_time is not None:
value.read_time = read_time
updates.append(value)
else:
if read_time is not None:
value.read_time = read_time
adds.append(value)
return (deletes, adds, updates)
def _compute_snapshot(
self, doc_tree, doc_map, delete_changes, add_changes, update_changes
):
updated_tree = doc_tree
updated_map = doc_map
assert len(doc_tree) == len(doc_map), (
"The document tree and document map should have the same "
+ "number of entries."
)
def delete_doc(name, updated_tree, updated_map):
"""
Applies a document delete to the document tree and document map.
Returns the corresponding DocumentChange event.
"""
assert name in updated_map, "Document to delete does not exist"
old_document = updated_map.get(name)
# TODO: If a document doesn't exist this raises IndexError. Handle?
existing = updated_tree.find(old_document)
old_index = existing.index
updated_tree = updated_tree.remove(old_document)
del updated_map[name]
return (
DocumentChange(ChangeType.REMOVED, old_document, old_index, -1),
updated_tree,
updated_map,
)
def add_doc(new_document, updated_tree, updated_map):
"""
Applies a document add to the document tree and the document map.
Returns the corresponding DocumentChange event.
"""
name = new_document.reference._document_path
assert name not in updated_map, "Document to add already exists"
updated_tree = updated_tree.insert(new_document, None)
new_index = updated_tree.find(new_document).index
updated_map[name] = new_document
return (
DocumentChange(ChangeType.ADDED, new_document, -1, new_index),
updated_tree,
updated_map,
)
def modify_doc(new_document, updated_tree, updated_map):
"""
Applies a document modification to the document tree and the
document map.
Returns the DocumentChange event for successful modifications.
"""
name = new_document.reference._document_path
assert name in updated_map, "Document to modify does not exist"
old_document = updated_map.get(name)
if old_document.update_time != new_document.update_time:
remove_change, updated_tree, updated_map = delete_doc(
name, updated_tree, updated_map
)
add_change, updated_tree, updated_map = add_doc(
new_document, updated_tree, updated_map
)
return (
DocumentChange(
ChangeType.MODIFIED,
new_document,
remove_change.old_index,
add_change.new_index,
),
updated_tree,
updated_map,
)
return None, updated_tree, updated_map
# Process the sorted changes in the order that is expected by our
# clients (removals, additions, and then modifications). We also need
# to sort the individual changes to assure that old_index/new_index
# keep incrementing.
appliedChanges = []
key = functools.cmp_to_key(self._comparator)
# Deletes are sorted based on the order of the existing document.
delete_changes = sorted(delete_changes, key=key)
for name in delete_changes:
change, updated_tree, updated_map = delete_doc(
name, updated_tree, updated_map
)
appliedChanges.append(change)
add_changes = sorted(add_changes, key=key)
_LOGGER.debug("walk over add_changes")
for snapshot in add_changes:
_LOGGER.debug("in add_changes")
change, updated_tree, updated_map = add_doc(
snapshot, updated_tree, updated_map
)
appliedChanges.append(change)
update_changes = sorted(update_changes, key=key)
for snapshot in update_changes:
change, updated_tree, updated_map = modify_doc(
snapshot, updated_tree, updated_map
)
if change is not None:
appliedChanges.append(change)
assert len(updated_tree) == len(updated_map), (
"The update document "
+ "tree and document map should have the same number of entries."
)
return (updated_tree, updated_map, appliedChanges)
def _affects_target(self, target_ids, current_id):
if target_ids is None:
return True
return current_id in target_ids
def _current_size(self):
"""
Returns the current count of all documents, including the changes from
the current changeMap.
"""
deletes, adds, _ = Watch._extract_changes(self.doc_map, self.change_map, None)
return len(self.doc_map) + len(adds) - len(deletes)
def _reset_docs(self):
"""
Helper to clear the docs on RESET or filter mismatch.
"""
_LOGGER.debug("resetting documents")
self.change_map.clear()
self.resume_token = None
# Mark each document as deleted. If documents are not deleted
# they will be sent again by the server.
for snapshot in self.doc_tree.keys():
name = snapshot.reference._document_path
self.change_map[name] = ChangeType.REMOVED
self.current = False