Added delete option to database storage.
This commit is contained in:
parent
308604a33c
commit
963b5bc68b
1868 changed files with 192402 additions and 13278 deletions
71
venv/Lib/site-packages/google/cloud/firestore_v1/__init__.py
Normal file
71
venv/Lib/site-packages/google/cloud/firestore_v1/__init__.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
# Copyright 2019 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Python idiomatic client for Google Cloud Firestore."""
|
||||
|
||||
from pkg_resources import get_distribution
|
||||
|
||||
__version__ = get_distribution("google-cloud-firestore").version
|
||||
|
||||
from google.cloud.firestore_v1 import types
|
||||
from google.cloud.firestore_v1._helpers import GeoPoint
|
||||
from google.cloud.firestore_v1._helpers import ExistsOption
|
||||
from google.cloud.firestore_v1._helpers import LastUpdateOption
|
||||
from google.cloud.firestore_v1._helpers import ReadAfterWriteError
|
||||
from google.cloud.firestore_v1._helpers import WriteOption
|
||||
from google.cloud.firestore_v1.batch import WriteBatch
|
||||
from google.cloud.firestore_v1.client import Client
|
||||
from google.cloud.firestore_v1.collection import CollectionReference
|
||||
from google.cloud.firestore_v1.transforms import ArrayRemove
|
||||
from google.cloud.firestore_v1.transforms import ArrayUnion
|
||||
from google.cloud.firestore_v1.transforms import DELETE_FIELD
|
||||
from google.cloud.firestore_v1.transforms import Increment
|
||||
from google.cloud.firestore_v1.transforms import Maximum
|
||||
from google.cloud.firestore_v1.transforms import Minimum
|
||||
from google.cloud.firestore_v1.transforms import SERVER_TIMESTAMP
|
||||
from google.cloud.firestore_v1.document import DocumentReference
|
||||
from google.cloud.firestore_v1.document import DocumentSnapshot
|
||||
from google.cloud.firestore_v1.gapic import enums
|
||||
from google.cloud.firestore_v1.query import Query
|
||||
from google.cloud.firestore_v1.transaction import Transaction
|
||||
from google.cloud.firestore_v1.transaction import transactional
|
||||
from google.cloud.firestore_v1.watch import Watch
|
||||
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
"ArrayRemove",
|
||||
"ArrayUnion",
|
||||
"Client",
|
||||
"CollectionReference",
|
||||
"DELETE_FIELD",
|
||||
"DocumentReference",
|
||||
"DocumentSnapshot",
|
||||
"enums",
|
||||
"ExistsOption",
|
||||
"GeoPoint",
|
||||
"Increment",
|
||||
"LastUpdateOption",
|
||||
"Maximum",
|
||||
"Minimum",
|
||||
"Query",
|
||||
"ReadAfterWriteError",
|
||||
"SERVER_TIMESTAMP",
|
||||
"Transaction",
|
||||
"transactional",
|
||||
"types",
|
||||
"Watch",
|
||||
"WriteBatch",
|
||||
"WriteOption",
|
||||
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
1049
venv/Lib/site-packages/google/cloud/firestore_v1/_helpers.py
Normal file
1049
venv/Lib/site-packages/google/cloud/firestore_v1/_helpers.py
Normal file
File diff suppressed because it is too large
Load diff
160
venv/Lib/site-packages/google/cloud/firestore_v1/batch.py
Normal file
160
venv/Lib/site-packages/google/cloud/firestore_v1/batch.py
Normal file
|
@ -0,0 +1,160 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helpers for batch requests to the Google Cloud Firestore API."""
|
||||
|
||||
|
||||
from google.cloud.firestore_v1 import _helpers
|
||||
|
||||
|
||||
class WriteBatch(object):
|
||||
"""Accumulate write operations to be sent in a batch.
|
||||
|
||||
This has the same set of methods for write operations that
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentReference` does,
|
||||
e.g. :meth:`~google.cloud.firestore_v1.document.DocumentReference.create`.
|
||||
|
||||
Args:
|
||||
client (:class:`~google.cloud.firestore_v1.client.Client`):
|
||||
The client that created this batch.
|
||||
"""
|
||||
|
||||
def __init__(self, client):
|
||||
self._client = client
|
||||
self._write_pbs = []
|
||||
self.write_results = None
|
||||
self.commit_time = None
|
||||
|
||||
def _add_write_pbs(self, write_pbs):
|
||||
"""Add `Write`` protobufs to this transaction.
|
||||
|
||||
This method intended to be over-ridden by subclasses.
|
||||
|
||||
Args:
|
||||
write_pbs (List[google.cloud.proto.firestore.v1.\
|
||||
write_pb2.Write]): A list of write protobufs to be added.
|
||||
"""
|
||||
self._write_pbs.extend(write_pbs)
|
||||
|
||||
def create(self, reference, document_data):
|
||||
"""Add a "change" to this batch to create a document.
|
||||
|
||||
If the document given by ``reference`` already exists, then this
|
||||
batch will fail when :meth:`commit`-ed.
|
||||
|
||||
Args:
|
||||
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
|
||||
A document reference to be created in this batch.
|
||||
document_data (dict): Property names and values to use for
|
||||
creating a document.
|
||||
"""
|
||||
write_pbs = _helpers.pbs_for_create(reference._document_path, document_data)
|
||||
self._add_write_pbs(write_pbs)
|
||||
|
||||
def set(self, reference, document_data, merge=False):
|
||||
"""Add a "change" to replace a document.
|
||||
|
||||
See
|
||||
:meth:`google.cloud.firestore_v1.document.DocumentReference.set` for
|
||||
more information on how ``option`` determines how the change is
|
||||
applied.
|
||||
|
||||
Args:
|
||||
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
|
||||
A document reference that will have values set in this batch.
|
||||
document_data (dict):
|
||||
Property names and values to use for replacing a document.
|
||||
merge (Optional[bool] or Optional[List<apispec>]):
|
||||
If True, apply merging instead of overwriting the state
|
||||
of the document.
|
||||
"""
|
||||
if merge is not False:
|
||||
write_pbs = _helpers.pbs_for_set_with_merge(
|
||||
reference._document_path, document_data, merge
|
||||
)
|
||||
else:
|
||||
write_pbs = _helpers.pbs_for_set_no_merge(
|
||||
reference._document_path, document_data
|
||||
)
|
||||
|
||||
self._add_write_pbs(write_pbs)
|
||||
|
||||
def update(self, reference, field_updates, option=None):
|
||||
"""Add a "change" to update a document.
|
||||
|
||||
See
|
||||
:meth:`google.cloud.firestore_v1.document.DocumentReference.update`
|
||||
for more information on ``field_updates`` and ``option``.
|
||||
|
||||
Args:
|
||||
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
|
||||
A document reference that will be updated in this batch.
|
||||
field_updates (dict):
|
||||
Field names or paths to update and values to update with.
|
||||
option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]):
|
||||
A write option to make assertions / preconditions on the server
|
||||
state of the document before applying changes.
|
||||
"""
|
||||
if option.__class__.__name__ == "ExistsOption":
|
||||
raise ValueError("you must not pass an explicit write option to " "update.")
|
||||
write_pbs = _helpers.pbs_for_update(
|
||||
reference._document_path, field_updates, option
|
||||
)
|
||||
self._add_write_pbs(write_pbs)
|
||||
|
||||
def delete(self, reference, option=None):
|
||||
"""Add a "change" to delete a document.
|
||||
|
||||
See
|
||||
:meth:`google.cloud.firestore_v1.document.DocumentReference.delete`
|
||||
for more information on how ``option`` determines how the change is
|
||||
applied.
|
||||
|
||||
Args:
|
||||
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
|
||||
A document reference that will be deleted in this batch.
|
||||
option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]):
|
||||
A write option to make assertions / preconditions on the server
|
||||
state of the document before applying changes.
|
||||
"""
|
||||
write_pb = _helpers.pb_for_delete(reference._document_path, option)
|
||||
self._add_write_pbs([write_pb])
|
||||
|
||||
def commit(self):
|
||||
"""Commit the changes accumulated in this batch.
|
||||
|
||||
Returns:
|
||||
List[:class:`google.cloud.proto.firestore.v1.write_pb2.WriteResult`, ...]:
|
||||
The write results corresponding to the changes committed, returned
|
||||
in the same order as the changes were applied to this batch. A
|
||||
write result contains an ``update_time`` field.
|
||||
"""
|
||||
commit_response = self._client._firestore_api.commit(
|
||||
self._client._database_string,
|
||||
self._write_pbs,
|
||||
transaction=None,
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
|
||||
self._write_pbs = []
|
||||
self.write_results = results = list(commit_response.write_results)
|
||||
self.commit_time = commit_response.commit_time
|
||||
return results
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if exc_type is None:
|
||||
self.commit()
|
622
venv/Lib/site-packages/google/cloud/firestore_v1/client.py
Normal file
622
venv/Lib/site-packages/google/cloud/firestore_v1/client.py
Normal file
|
@ -0,0 +1,622 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Client for interacting with the Google Cloud Firestore API.
|
||||
|
||||
This is the base from which all interactions with the API occur.
|
||||
|
||||
In the hierarchy of API concepts
|
||||
|
||||
* a :class:`~google.cloud.firestore_v1.client.Client` owns a
|
||||
:class:`~google.cloud.firestore_v1.collection.CollectionReference`
|
||||
* a :class:`~google.cloud.firestore_v1.client.Client` owns a
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentReference`
|
||||
"""
|
||||
import os
|
||||
|
||||
import google.api_core.client_options
|
||||
from google.api_core.gapic_v1 import client_info
|
||||
from google.cloud.client import ClientWithProject
|
||||
|
||||
from google.cloud.firestore_v1 import _helpers
|
||||
from google.cloud.firestore_v1 import __version__
|
||||
from google.cloud.firestore_v1 import query
|
||||
from google.cloud.firestore_v1 import types
|
||||
from google.cloud.firestore_v1.batch import WriteBatch
|
||||
from google.cloud.firestore_v1.collection import CollectionReference
|
||||
from google.cloud.firestore_v1.document import DocumentReference
|
||||
from google.cloud.firestore_v1.document import DocumentSnapshot
|
||||
from google.cloud.firestore_v1.field_path import render_field_path
|
||||
from google.cloud.firestore_v1.gapic import firestore_client
|
||||
from google.cloud.firestore_v1.gapic.transports import firestore_grpc_transport
|
||||
from google.cloud.firestore_v1.transaction import Transaction
|
||||
|
||||
|
||||
DEFAULT_DATABASE = "(default)"
|
||||
"""str: The default database used in a :class:`~google.cloud.firestore_v1.client.Client`."""
|
||||
_BAD_OPTION_ERR = (
|
||||
"Exactly one of ``last_update_time`` or ``exists`` " "must be provided."
|
||||
)
|
||||
_BAD_DOC_TEMPLATE = (
|
||||
"Document {!r} appeared in response but was not present among references"
|
||||
)
|
||||
_ACTIVE_TXN = "There is already an active transaction."
|
||||
_INACTIVE_TXN = "There is no active transaction."
|
||||
_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__)
|
||||
_FIRESTORE_EMULATOR_HOST = "FIRESTORE_EMULATOR_HOST"
|
||||
|
||||
|
||||
class Client(ClientWithProject):
|
||||
"""Client for interacting with Google Cloud Firestore API.
|
||||
|
||||
.. note::
|
||||
|
||||
Since the Cloud Firestore API requires the gRPC transport, no
|
||||
``_http`` argument is accepted by this class.
|
||||
|
||||
Args:
|
||||
project (Optional[str]): The project which the client acts on behalf
|
||||
of. If not passed, falls back to the default inferred
|
||||
from the environment.
|
||||
credentials (Optional[~google.auth.credentials.Credentials]): The
|
||||
OAuth2 Credentials to use for this client. If not passed, falls
|
||||
back to the default inferred from the environment.
|
||||
database (Optional[str]): The database name that the client targets.
|
||||
For now, :attr:`DEFAULT_DATABASE` (the default value) is the
|
||||
only valid database.
|
||||
client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]):
|
||||
The client info used to send a user-agent string along with API
|
||||
requests. If ``None``, then default info will be used. Generally,
|
||||
you only need to set this if you're developing your own library
|
||||
or partner tool.
|
||||
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
|
||||
Client options used to set user options on the client. API Endpoint
|
||||
should be set through client_options.
|
||||
"""
|
||||
|
||||
SCOPE = (
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/datastore",
|
||||
)
|
||||
"""The scopes required for authenticating with the Firestore service."""
|
||||
|
||||
_firestore_api_internal = None
|
||||
_database_string_internal = None
|
||||
_rpc_metadata_internal = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
project=None,
|
||||
credentials=None,
|
||||
database=DEFAULT_DATABASE,
|
||||
client_info=_CLIENT_INFO,
|
||||
client_options=None,
|
||||
):
|
||||
# NOTE: This API has no use for the _http argument, but sending it
|
||||
# will have no impact since the _http() @property only lazily
|
||||
# creates a working HTTP object.
|
||||
super(Client, self).__init__(
|
||||
project=project,
|
||||
credentials=credentials,
|
||||
client_options=client_options,
|
||||
_http=None,
|
||||
)
|
||||
self._client_info = client_info
|
||||
if client_options:
|
||||
if type(client_options) == dict:
|
||||
client_options = google.api_core.client_options.from_dict(
|
||||
client_options
|
||||
)
|
||||
self._client_options = client_options
|
||||
|
||||
self._database = database
|
||||
self._emulator_host = os.getenv(_FIRESTORE_EMULATOR_HOST)
|
||||
|
||||
@property
|
||||
def _firestore_api(self):
|
||||
"""Lazy-loading getter GAPIC Firestore API.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.gapic.firestore.v1`.firestore_client.FirestoreClient:
|
||||
<The GAPIC client with the credentials of the current client.
|
||||
"""
|
||||
if self._firestore_api_internal is None:
|
||||
# Use a custom channel.
|
||||
# We need this in order to set appropriate keepalive options.
|
||||
|
||||
if self._emulator_host is not None:
|
||||
channel = firestore_grpc_transport.firestore_pb2_grpc.grpc.insecure_channel(
|
||||
self._emulator_host
|
||||
)
|
||||
else:
|
||||
channel = firestore_grpc_transport.FirestoreGrpcTransport.create_channel(
|
||||
self._target,
|
||||
credentials=self._credentials,
|
||||
options={"grpc.keepalive_time_ms": 30000}.items(),
|
||||
)
|
||||
|
||||
self._transport = firestore_grpc_transport.FirestoreGrpcTransport(
|
||||
address=self._target, channel=channel
|
||||
)
|
||||
|
||||
self._firestore_api_internal = firestore_client.FirestoreClient(
|
||||
transport=self._transport, client_info=self._client_info
|
||||
)
|
||||
|
||||
return self._firestore_api_internal
|
||||
|
||||
@property
|
||||
def _target(self):
|
||||
"""Return the target (where the API is).
|
||||
|
||||
Returns:
|
||||
str: The location of the API.
|
||||
"""
|
||||
if self._emulator_host is not None:
|
||||
return self._emulator_host
|
||||
elif self._client_options and self._client_options.api_endpoint:
|
||||
return self._client_options.api_endpoint
|
||||
else:
|
||||
return firestore_client.FirestoreClient.SERVICE_ADDRESS
|
||||
|
||||
@property
|
||||
def _database_string(self):
|
||||
"""The database string corresponding to this client's project.
|
||||
|
||||
This value is lazy-loaded and cached.
|
||||
|
||||
Will be of the form
|
||||
|
||||
``projects/{project_id}/databases/{database_id}``
|
||||
|
||||
but ``database_id == '(default)'`` for the time being.
|
||||
|
||||
Returns:
|
||||
str: The fully-qualified database string for the current
|
||||
project. (The default database is also in this string.)
|
||||
"""
|
||||
if self._database_string_internal is None:
|
||||
# NOTE: database_root_path() is a classmethod, so we don't use
|
||||
# self._firestore_api (it isn't necessary).
|
||||
db_str = firestore_client.FirestoreClient.database_root_path(
|
||||
self.project, self._database
|
||||
)
|
||||
self._database_string_internal = db_str
|
||||
|
||||
return self._database_string_internal
|
||||
|
||||
@property
|
||||
def _rpc_metadata(self):
|
||||
"""The RPC metadata for this client's associated database.
|
||||
|
||||
Returns:
|
||||
Sequence[Tuple(str, str)]: RPC metadata with resource prefix
|
||||
for the database associated with this client.
|
||||
"""
|
||||
if self._rpc_metadata_internal is None:
|
||||
self._rpc_metadata_internal = _helpers.metadata_with_prefix(
|
||||
self._database_string
|
||||
)
|
||||
|
||||
if self._emulator_host is not None:
|
||||
# The emulator requires additional metadata to be set.
|
||||
self._rpc_metadata_internal.append(("authorization", "Bearer owner"))
|
||||
|
||||
return self._rpc_metadata_internal
|
||||
|
||||
def collection(self, *collection_path):
|
||||
"""Get a reference to a collection.
|
||||
|
||||
For a top-level collection:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.collection('top')
|
||||
|
||||
For a sub-collection:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.collection('mydocs/doc/subcol')
|
||||
>>> # is the same as
|
||||
>>> client.collection('mydocs', 'doc', 'subcol')
|
||||
|
||||
Sub-collections can be nested deeper in a similar fashion.
|
||||
|
||||
Args:
|
||||
collection_path (Tuple[str, ...]): Can either be
|
||||
|
||||
* A single ``/``-delimited path to a collection
|
||||
* A tuple of collection path segments
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.collection.CollectionReference`:
|
||||
A reference to a collection in the Firestore database.
|
||||
"""
|
||||
if len(collection_path) == 1:
|
||||
path = collection_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
|
||||
else:
|
||||
path = collection_path
|
||||
|
||||
return CollectionReference(*path, client=self)
|
||||
|
||||
def collection_group(self, collection_id):
|
||||
"""
|
||||
Creates and returns a new Query that includes all documents in the
|
||||
database that are contained in a collection or subcollection with the
|
||||
given collection_id.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> query = client.collection_group('mygroup')
|
||||
|
||||
@param {string} collectionId Identifies the collections to query over.
|
||||
Every collection or subcollection with this ID as the last segment of its
|
||||
path will be included. Cannot contain a slash.
|
||||
@returns {Query} The created Query.
|
||||
"""
|
||||
if "/" in collection_id:
|
||||
raise ValueError(
|
||||
"Invalid collection_id "
|
||||
+ collection_id
|
||||
+ ". Collection IDs must not contain '/'."
|
||||
)
|
||||
|
||||
collection = self.collection(collection_id)
|
||||
return query.Query(collection, all_descendants=True)
|
||||
|
||||
def document(self, *document_path):
|
||||
"""Get a reference to a document in a collection.
|
||||
|
||||
For a top-level document:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.document('collek/shun')
|
||||
>>> # is the same as
|
||||
>>> client.document('collek', 'shun')
|
||||
|
||||
For a document in a sub-collection:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> client.document('mydocs/doc/subcol/child')
|
||||
>>> # is the same as
|
||||
>>> client.document('mydocs', 'doc', 'subcol', 'child')
|
||||
|
||||
Documents in sub-collections can be nested deeper in a similar fashion.
|
||||
|
||||
Args:
|
||||
document_path (Tuple[str, ...]): Can either be
|
||||
|
||||
* A single ``/``-delimited path to a document
|
||||
* A tuple of document path segments
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentReference`:
|
||||
A reference to a document in a collection.
|
||||
"""
|
||||
if len(document_path) == 1:
|
||||
path = document_path[0].split(_helpers.DOCUMENT_PATH_DELIMITER)
|
||||
else:
|
||||
path = document_path
|
||||
|
||||
# DocumentReference takes a relative path. Strip the database string if present.
|
||||
base_path = self._database_string + "/documents/"
|
||||
joined_path = _helpers.DOCUMENT_PATH_DELIMITER.join(path)
|
||||
if joined_path.startswith(base_path):
|
||||
joined_path = joined_path[len(base_path) :]
|
||||
path = joined_path.split(_helpers.DOCUMENT_PATH_DELIMITER)
|
||||
|
||||
return DocumentReference(*path, client=self)
|
||||
|
||||
@staticmethod
|
||||
def field_path(*field_names):
|
||||
"""Create a **field path** from a list of nested field names.
|
||||
|
||||
A **field path** is a ``.``-delimited concatenation of the field
|
||||
names. It is used to represent a nested field. For example,
|
||||
in the data
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
data = {
|
||||
'aa': {
|
||||
'bb': {
|
||||
'cc': 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
the field path ``'aa.bb.cc'`` represents the data stored in
|
||||
``data['aa']['bb']['cc']``.
|
||||
|
||||
Args:
|
||||
field_names (Tuple[str, ...]): The list of field names.
|
||||
|
||||
Returns:
|
||||
str: The ``.``-delimited field path.
|
||||
"""
|
||||
return render_field_path(field_names)
|
||||
|
||||
@staticmethod
|
||||
def write_option(**kwargs):
|
||||
"""Create a write option for write operations.
|
||||
|
||||
Write operations include :meth:`~google.cloud.DocumentReference.set`,
|
||||
:meth:`~google.cloud.DocumentReference.update` and
|
||||
:meth:`~google.cloud.DocumentReference.delete`.
|
||||
|
||||
One of the following keyword arguments must be provided:
|
||||
|
||||
* ``last_update_time`` (:class:`google.protobuf.timestamp_pb2.\
|
||||
Timestamp`): A timestamp. When set, the target document must
|
||||
exist and have been last updated at that time. Protobuf
|
||||
``update_time`` timestamps are typically returned from methods
|
||||
that perform write operations as part of a "write result"
|
||||
protobuf or directly.
|
||||
* ``exists`` (:class:`bool`): Indicates if the document being modified
|
||||
should already exist.
|
||||
|
||||
Providing no argument would make the option have no effect (so
|
||||
it is not allowed). Providing multiple would be an apparent
|
||||
contradiction, since ``last_update_time`` assumes that the
|
||||
document **was** updated (it can't have been updated if it
|
||||
doesn't exist) and ``exists`` indicate that it is unknown if the
|
||||
document exists or not.
|
||||
|
||||
Args:
|
||||
kwargs (Dict[str, Any]): The keyword arguments described above.
|
||||
|
||||
Raises:
|
||||
TypeError: If anything other than exactly one argument is
|
||||
provided by the caller.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.client.WriteOption`:
|
||||
The option to be used to configure a write message.
|
||||
"""
|
||||
if len(kwargs) != 1:
|
||||
raise TypeError(_BAD_OPTION_ERR)
|
||||
|
||||
name, value = kwargs.popitem()
|
||||
if name == "last_update_time":
|
||||
return _helpers.LastUpdateOption(value)
|
||||
elif name == "exists":
|
||||
return _helpers.ExistsOption(value)
|
||||
else:
|
||||
extra = "{!r} was provided".format(name)
|
||||
raise TypeError(_BAD_OPTION_ERR, extra)
|
||||
|
||||
def get_all(self, references, field_paths=None, transaction=None):
|
||||
"""Retrieve a batch of documents.
|
||||
|
||||
.. note::
|
||||
|
||||
Documents returned by this method are not guaranteed to be
|
||||
returned in the same order that they are given in ``references``.
|
||||
|
||||
.. note::
|
||||
|
||||
If multiple ``references`` refer to the same document, the server
|
||||
will only return one result.
|
||||
|
||||
See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for
|
||||
more information on **field paths**.
|
||||
|
||||
If a ``transaction`` is used and it already has write operations
|
||||
added, this method cannot be used (i.e. read-after-write is not
|
||||
allowed).
|
||||
|
||||
Args:
|
||||
references (List[.DocumentReference, ...]): Iterable of document
|
||||
references to be retrieved.
|
||||
field_paths (Optional[Iterable[str, ...]]): An iterable of field
|
||||
paths (``.``-delimited list of field names) to use as a
|
||||
projection of document fields in the returned results. If
|
||||
no value is provided, all fields will be returned.
|
||||
transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]):
|
||||
An existing transaction that these ``references`` will be
|
||||
retrieved in.
|
||||
|
||||
Yields:
|
||||
.DocumentSnapshot: The next document snapshot that fulfills the
|
||||
query, or :data:`None` if the document does not exist.
|
||||
"""
|
||||
document_paths, reference_map = _reference_info(references)
|
||||
mask = _get_doc_mask(field_paths)
|
||||
response_iterator = self._firestore_api.batch_get_documents(
|
||||
self._database_string,
|
||||
document_paths,
|
||||
mask,
|
||||
transaction=_helpers.get_transaction_id(transaction),
|
||||
metadata=self._rpc_metadata,
|
||||
)
|
||||
|
||||
for get_doc_response in response_iterator:
|
||||
yield _parse_batch_get(get_doc_response, reference_map, self)
|
||||
|
||||
def collections(self):
|
||||
"""List top-level collections of the client's database.
|
||||
|
||||
Returns:
|
||||
Sequence[:class:`~google.cloud.firestore_v1.collection.CollectionReference`]:
|
||||
iterator of subcollections of the current document.
|
||||
"""
|
||||
iterator = self._firestore_api.list_collection_ids(
|
||||
"{}/documents".format(self._database_string), metadata=self._rpc_metadata
|
||||
)
|
||||
iterator.client = self
|
||||
iterator.item_to_value = _item_to_collection_ref
|
||||
return iterator
|
||||
|
||||
def batch(self):
|
||||
"""Get a batch instance from this client.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.batch.WriteBatch`:
|
||||
A "write" batch to be used for accumulating document changes and
|
||||
sending the changes all at once.
|
||||
"""
|
||||
return WriteBatch(self)
|
||||
|
||||
def transaction(self, **kwargs):
|
||||
"""Get a transaction that uses this client.
|
||||
|
||||
See :class:`~google.cloud.firestore_v1.transaction.Transaction` for
|
||||
more information on transactions and the constructor arguments.
|
||||
|
||||
Args:
|
||||
kwargs (Dict[str, Any]): The keyword arguments (other than
|
||||
``client``) to pass along to the
|
||||
:class:`~google.cloud.firestore_v1.transaction.Transaction`
|
||||
constructor.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.transaction.Transaction`:
|
||||
A transaction attached to this client.
|
||||
"""
|
||||
return Transaction(self, **kwargs)
|
||||
|
||||
|
||||
def _reference_info(references):
|
||||
"""Get information about document references.
|
||||
|
||||
Helper for :meth:`~google.cloud.firestore_v1.client.Client.get_all`.
|
||||
|
||||
Args:
|
||||
references (List[.DocumentReference, ...]): Iterable of document
|
||||
references.
|
||||
|
||||
Returns:
|
||||
Tuple[List[str, ...], Dict[str, .DocumentReference]]: A two-tuple of
|
||||
|
||||
* fully-qualified documents paths for each reference in ``references``
|
||||
* a mapping from the paths to the original reference. (If multiple
|
||||
``references`` contains multiple references to the same document,
|
||||
that key will be overwritten in the result.)
|
||||
"""
|
||||
document_paths = []
|
||||
reference_map = {}
|
||||
for reference in references:
|
||||
doc_path = reference._document_path
|
||||
document_paths.append(doc_path)
|
||||
reference_map[doc_path] = reference
|
||||
|
||||
return document_paths, reference_map
|
||||
|
||||
|
||||
def _get_reference(document_path, reference_map):
|
||||
"""Get a document reference from a dictionary.
|
||||
|
||||
This just wraps a simple dictionary look-up with a helpful error that is
|
||||
specific to :meth:`~google.cloud.firestore.client.Client.get_all`, the
|
||||
**public** caller of this function.
|
||||
|
||||
Args:
|
||||
document_path (str): A fully-qualified document path.
|
||||
reference_map (Dict[str, .DocumentReference]): A mapping (produced
|
||||
by :func:`_reference_info`) of fully-qualified document paths to
|
||||
document references.
|
||||
|
||||
Returns:
|
||||
.DocumentReference: The matching reference.
|
||||
|
||||
Raises:
|
||||
ValueError: If ``document_path`` has not been encountered.
|
||||
"""
|
||||
try:
|
||||
return reference_map[document_path]
|
||||
except KeyError:
|
||||
msg = _BAD_DOC_TEMPLATE.format(document_path)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def _parse_batch_get(get_doc_response, reference_map, client):
|
||||
"""Parse a `BatchGetDocumentsResponse` protobuf.
|
||||
|
||||
Args:
|
||||
get_doc_response (~google.cloud.proto.firestore.v1.\
|
||||
firestore_pb2.BatchGetDocumentsResponse): A single response (from
|
||||
a stream) containing the "get" response for a document.
|
||||
reference_map (Dict[str, .DocumentReference]): A mapping (produced
|
||||
by :func:`_reference_info`) of fully-qualified document paths to
|
||||
document references.
|
||||
client (:class:`~google.cloud.firestore_v1.client.Client`):
|
||||
A client that has a document factory.
|
||||
|
||||
Returns:
|
||||
[.DocumentSnapshot]: The retrieved snapshot.
|
||||
|
||||
Raises:
|
||||
ValueError: If the response has a ``result`` field (a oneof) other
|
||||
than ``found`` or ``missing``.
|
||||
"""
|
||||
result_type = get_doc_response.WhichOneof("result")
|
||||
if result_type == "found":
|
||||
reference = _get_reference(get_doc_response.found.name, reference_map)
|
||||
data = _helpers.decode_dict(get_doc_response.found.fields, client)
|
||||
snapshot = DocumentSnapshot(
|
||||
reference,
|
||||
data,
|
||||
exists=True,
|
||||
read_time=get_doc_response.read_time,
|
||||
create_time=get_doc_response.found.create_time,
|
||||
update_time=get_doc_response.found.update_time,
|
||||
)
|
||||
elif result_type == "missing":
|
||||
reference = _get_reference(get_doc_response.missing, reference_map)
|
||||
snapshot = DocumentSnapshot(
|
||||
reference,
|
||||
None,
|
||||
exists=False,
|
||||
read_time=get_doc_response.read_time,
|
||||
create_time=None,
|
||||
update_time=None,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"`BatchGetDocumentsResponse.result` (a oneof) had a field other "
|
||||
"than `found` or `missing` set, or was unset"
|
||||
)
|
||||
return snapshot
|
||||
|
||||
|
||||
def _get_doc_mask(field_paths):
|
||||
"""Get a document mask if field paths are provided.
|
||||
|
||||
Args:
|
||||
field_paths (Optional[Iterable[str, ...]]): An iterable of field
|
||||
paths (``.``-delimited list of field names) to use as a
|
||||
projection of document fields in the returned results.
|
||||
|
||||
Returns:
|
||||
Optional[google.cloud.firestore_v1.types.DocumentMask]: A mask
|
||||
to project documents to a restricted set of field paths.
|
||||
"""
|
||||
if field_paths is None:
|
||||
return None
|
||||
else:
|
||||
return types.DocumentMask(field_paths=field_paths)
|
||||
|
||||
|
||||
def _item_to_collection_ref(iterator, item):
|
||||
"""Convert collection ID to collection ref.
|
||||
|
||||
Args:
|
||||
iterator (google.api_core.page_iterator.GRPCIterator):
|
||||
iterator response
|
||||
item (str): ID of the collection
|
||||
"""
|
||||
return iterator.client.collection(item)
|
509
venv/Lib/site-packages/google/cloud/firestore_v1/collection.py
Normal file
509
venv/Lib/site-packages/google/cloud/firestore_v1/collection.py
Normal file
|
@ -0,0 +1,509 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Classes for representing collections for the Google Cloud Firestore API."""
|
||||
import random
|
||||
|
||||
import six
|
||||
|
||||
from google.cloud.firestore_v1 import _helpers
|
||||
from google.cloud.firestore_v1 import query as query_mod
|
||||
from google.cloud.firestore_v1.watch import Watch
|
||||
from google.cloud.firestore_v1 import document
|
||||
|
||||
_AUTO_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
|
||||
class CollectionReference(object):
|
||||
"""A reference to a collection in a Firestore database.
|
||||
|
||||
The collection may already exist or this class can facilitate creation
|
||||
of documents within the collection.
|
||||
|
||||
Args:
|
||||
path (Tuple[str, ...]): The components in the collection path.
|
||||
This is a series of strings representing each collection and
|
||||
sub-collection ID, as well as the document IDs for any documents
|
||||
that contain a sub-collection.
|
||||
kwargs (dict): The keyword arguments for the constructor. The only
|
||||
supported keyword is ``client`` and it must be a
|
||||
:class:`~google.cloud.firestore_v1.client.Client` if provided. It
|
||||
represents the client that created this collection reference.
|
||||
|
||||
Raises:
|
||||
ValueError: if
|
||||
|
||||
* the ``path`` is empty
|
||||
* there are an even number of elements
|
||||
* a collection ID in ``path`` is not a string
|
||||
* a document ID in ``path`` is not a string
|
||||
TypeError: If a keyword other than ``client`` is used.
|
||||
"""
|
||||
|
||||
def __init__(self, *path, **kwargs):
|
||||
_helpers.verify_path(path, is_collection=True)
|
||||
self._path = path
|
||||
self._client = kwargs.pop("client", None)
|
||||
if kwargs:
|
||||
raise TypeError(
|
||||
"Received unexpected arguments", kwargs, "Only `client` is supported"
|
||||
)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self._path == other._path and self._client == other._client
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""The collection identifier.
|
||||
|
||||
Returns:
|
||||
str: The last component of the path.
|
||||
"""
|
||||
return self._path[-1]
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
"""Document that owns the current collection.
|
||||
|
||||
Returns:
|
||||
Optional[:class:`~google.cloud.firestore_v1.document.DocumentReference`]:
|
||||
The parent document, if the current collection is not a
|
||||
top-level collection.
|
||||
"""
|
||||
if len(self._path) == 1:
|
||||
return None
|
||||
else:
|
||||
parent_path = self._path[:-1]
|
||||
return self._client.document(*parent_path)
|
||||
|
||||
def document(self, document_id=None):
|
||||
"""Create a sub-document underneath the current collection.
|
||||
|
||||
Args:
|
||||
document_id (Optional[str]): The document identifier
|
||||
within the current collection. If not provided, will default
|
||||
to a random 20 character string composed of digits,
|
||||
uppercase and lowercase and letters.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentReference`:
|
||||
The child document.
|
||||
"""
|
||||
if document_id is None:
|
||||
document_id = _auto_id()
|
||||
|
||||
child_path = self._path + (document_id,)
|
||||
return self._client.document(*child_path)
|
||||
|
||||
def _parent_info(self):
|
||||
"""Get fully-qualified parent path and prefix for this collection.
|
||||
|
||||
Returns:
|
||||
Tuple[str, str]: Pair of
|
||||
|
||||
* the fully-qualified (with database and project) path to the
|
||||
parent of this collection (will either be the database path
|
||||
or a document path).
|
||||
* the prefix to a document in this collection.
|
||||
"""
|
||||
parent_doc = self.parent
|
||||
if parent_doc is None:
|
||||
parent_path = _helpers.DOCUMENT_PATH_DELIMITER.join(
|
||||
(self._client._database_string, "documents")
|
||||
)
|
||||
else:
|
||||
parent_path = parent_doc._document_path
|
||||
|
||||
expected_prefix = _helpers.DOCUMENT_PATH_DELIMITER.join((parent_path, self.id))
|
||||
return parent_path, expected_prefix
|
||||
|
||||
def add(self, document_data, document_id=None):
|
||||
"""Create a document in the Firestore database with the provided data.
|
||||
|
||||
Args:
|
||||
document_data (dict): Property names and values to use for
|
||||
creating the document.
|
||||
document_id (Optional[str]): The document identifier within the
|
||||
current collection. If not provided, an ID will be
|
||||
automatically assigned by the server (the assigned ID will be
|
||||
a random 20 character string composed of digits,
|
||||
uppercase and lowercase letters).
|
||||
|
||||
Returns:
|
||||
Tuple[:class:`google.protobuf.timestamp_pb2.Timestamp`, \
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentReference`]:
|
||||
Pair of
|
||||
|
||||
* The ``update_time`` when the document was created/overwritten.
|
||||
* A document reference for the created document.
|
||||
|
||||
Raises:
|
||||
~google.cloud.exceptions.Conflict: If ``document_id`` is provided
|
||||
and the document already exists.
|
||||
"""
|
||||
if document_id is None:
|
||||
document_id = _auto_id()
|
||||
|
||||
document_ref = self.document(document_id)
|
||||
write_result = document_ref.create(document_data)
|
||||
return write_result.update_time, document_ref
|
||||
|
||||
def list_documents(self, page_size=None):
|
||||
"""List all subdocuments of the current collection.
|
||||
|
||||
Args:
|
||||
page_size (Optional[int]]): The maximum number of documents
|
||||
in each page of results from this request. Non-positive values
|
||||
are ignored. Defaults to a sensible value set by the API.
|
||||
|
||||
Returns:
|
||||
Sequence[:class:`~google.cloud.firestore_v1.collection.DocumentReference`]:
|
||||
iterator of subdocuments of the current collection. If the
|
||||
collection does not exist at the time of `snapshot`, the
|
||||
iterator will be empty
|
||||
"""
|
||||
parent, _ = self._parent_info()
|
||||
|
||||
iterator = self._client._firestore_api.list_documents(
|
||||
parent,
|
||||
self.id,
|
||||
page_size=page_size,
|
||||
show_missing=True,
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
iterator.collection = self
|
||||
iterator.item_to_value = _item_to_document_ref
|
||||
return iterator
|
||||
|
||||
def select(self, field_paths):
|
||||
"""Create a "select" query with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.select` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
field_paths (Iterable[str, ...]): An iterable of field paths
|
||||
(``.``-delimited list of field names) to use as a projection
|
||||
of document fields in the query results.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A "projected" query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.select(field_paths)
|
||||
|
||||
def where(self, field_path, op_string, value):
|
||||
"""Create a "where" query with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.where` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
field_path (str): A field path (``.``-delimited list of
|
||||
field names) for the field to filter on.
|
||||
op_string (str): A comparison operation in the form of a string.
|
||||
Acceptable values are ``<``, ``<=``, ``==``, ``>=``
|
||||
and ``>``.
|
||||
value (Any): The value to compare the field against in the filter.
|
||||
If ``value`` is :data:`None` or a NaN, then ``==`` is the only
|
||||
allowed operation.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A filtered query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.where(field_path, op_string, value)
|
||||
|
||||
def order_by(self, field_path, **kwargs):
|
||||
"""Create an "order by" query with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.order_by` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
field_path (str): A field path (``.``-delimited list of
|
||||
field names) on which to order the query results.
|
||||
kwargs (Dict[str, Any]): The keyword arguments to pass along
|
||||
to the query. The only supported keyword is ``direction``,
|
||||
see :meth:`~google.cloud.firestore_v1.query.Query.order_by`
|
||||
for more information.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
An "order by" query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.order_by(field_path, **kwargs)
|
||||
|
||||
def limit(self, count):
|
||||
"""Create a limited query with this collection as parent.
|
||||
|
||||
.. note::
|
||||
|
||||
`limit` and `limit_to_last` are mutually exclusive.
|
||||
Setting `limit` will drop previously set `limit_to_last`.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.limit` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
count (int): Maximum number of documents to return that match
|
||||
the query.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A limited query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.limit(count)
|
||||
|
||||
def limit_to_last(self, count):
|
||||
"""Create a limited to last query with this collection as parent.
|
||||
|
||||
.. note::
|
||||
|
||||
`limit` and `limit_to_last` are mutually exclusive.
|
||||
Setting `limit_to_last` will drop previously set `limit`.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.limit_to_last`
|
||||
for more information on this method.
|
||||
|
||||
Args:
|
||||
count (int): Maximum number of documents to return that
|
||||
match the query.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A limited to last query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.limit_to_last(count)
|
||||
|
||||
def offset(self, num_to_skip):
|
||||
"""Skip to an offset in a query with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.offset` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
num_to_skip (int): The number of results to skip at the beginning
|
||||
of query results. (Must be non-negative.)
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
An offset query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.offset(num_to_skip)
|
||||
|
||||
def start_at(self, document_fields):
|
||||
"""Start query at a cursor with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.start_at` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
document_fields (Union[:class:`~google.cloud.firestore_v1.\
|
||||
document.DocumentSnapshot`, dict, list, tuple]):
|
||||
A document snapshot or a dictionary/list/tuple of fields
|
||||
representing a query results cursor. A cursor is a collection
|
||||
of values that represent a position in a query result set.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A query with cursor.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.start_at(document_fields)
|
||||
|
||||
def start_after(self, document_fields):
|
||||
"""Start query after a cursor with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.start_after` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
document_fields (Union[:class:`~google.cloud.firestore_v1.\
|
||||
document.DocumentSnapshot`, dict, list, tuple]):
|
||||
A document snapshot or a dictionary/list/tuple of fields
|
||||
representing a query results cursor. A cursor is a collection
|
||||
of values that represent a position in a query result set.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A query with cursor.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.start_after(document_fields)
|
||||
|
||||
def end_before(self, document_fields):
|
||||
"""End query before a cursor with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.end_before` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
document_fields (Union[:class:`~google.cloud.firestore_v1.\
|
||||
document.DocumentSnapshot`, dict, list, tuple]):
|
||||
A document snapshot or a dictionary/list/tuple of fields
|
||||
representing a query results cursor. A cursor is a collection
|
||||
of values that represent a position in a query result set.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A query with cursor.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.end_before(document_fields)
|
||||
|
||||
def end_at(self, document_fields):
|
||||
"""End query at a cursor with this collection as parent.
|
||||
|
||||
See
|
||||
:meth:`~google.cloud.firestore_v1.query.Query.end_at` for
|
||||
more information on this method.
|
||||
|
||||
Args:
|
||||
document_fields (Union[:class:`~google.cloud.firestore_v1.\
|
||||
document.DocumentSnapshot`, dict, list, tuple]):
|
||||
A document snapshot or a dictionary/list/tuple of fields
|
||||
representing a query results cursor. A cursor is a collection
|
||||
of values that represent a position in a query result set.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.query.Query`:
|
||||
A query with cursor.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.end_at(document_fields)
|
||||
|
||||
def get(self, transaction=None):
|
||||
"""Read the documents in this collection.
|
||||
|
||||
This sends a ``RunQuery`` RPC and returns a list of documents
|
||||
returned in the stream of ``RunQueryResponse`` messages.
|
||||
|
||||
Args:
|
||||
transaction
|
||||
(Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]):
|
||||
An existing transaction that this query will run in.
|
||||
|
||||
If a ``transaction`` is used and it already has write operations
|
||||
added, this method cannot be used (i.e. read-after-write is not
|
||||
allowed).
|
||||
|
||||
Returns:
|
||||
list: The documents in this collection that match the query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.get(transaction=transaction)
|
||||
|
||||
def stream(self, transaction=None):
|
||||
"""Read the documents in this collection.
|
||||
|
||||
This sends a ``RunQuery`` RPC and then returns an iterator which
|
||||
consumes each document returned in the stream of ``RunQueryResponse``
|
||||
messages.
|
||||
|
||||
.. note::
|
||||
|
||||
The underlying stream of responses will time out after
|
||||
the ``max_rpc_timeout_millis`` value set in the GAPIC
|
||||
client configuration for the ``RunQuery`` API. Snapshots
|
||||
not consumed from the iterator before that point will be lost.
|
||||
|
||||
If a ``transaction`` is used and it already has write operations
|
||||
added, this method cannot be used (i.e. read-after-write is not
|
||||
allowed).
|
||||
|
||||
Args:
|
||||
transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.\
|
||||
Transaction`]):
|
||||
An existing transaction that the query will run in.
|
||||
|
||||
Yields:
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`:
|
||||
The next document that fulfills the query.
|
||||
"""
|
||||
query = query_mod.Query(self)
|
||||
return query.stream(transaction=transaction)
|
||||
|
||||
def on_snapshot(self, callback):
|
||||
"""Monitor the documents in this collection.
|
||||
|
||||
This starts a watch on this collection using a background thread. The
|
||||
provided callback is run on the snapshot of the documents.
|
||||
|
||||
Args:
|
||||
callback (Callable[List[:class:`~google.cloud.firestore_v1.collection.CollectionSnapshot`], \
|
||||
List[:class:`~google.cloud.firestore_v1.watch.DocumentChange`], datetime.datetime], NoneType):
|
||||
a callback to run when a change occurs.
|
||||
|
||||
Example:
|
||||
from google.cloud import firestore_v1
|
||||
|
||||
db = firestore_v1.Client()
|
||||
collection_ref = db.collection(u'users')
|
||||
|
||||
def on_snapshot(docs, changes, read_time):
|
||||
for doc in docs:
|
||||
print(u'{} => {}'.format(doc.id, doc.to_dict()))
|
||||
|
||||
# Watch this collection
|
||||
collection_watch = collection_ref.on_snapshot(on_snapshot)
|
||||
|
||||
# Terminate this watch
|
||||
collection_watch.unsubscribe()
|
||||
"""
|
||||
return Watch.for_query(
|
||||
query_mod.Query(self),
|
||||
callback,
|
||||
document.DocumentSnapshot,
|
||||
document.DocumentReference,
|
||||
)
|
||||
|
||||
|
||||
def _auto_id():
|
||||
"""Generate a "random" automatically generated ID.
|
||||
|
||||
Returns:
|
||||
str: A 20 character string composed of digits, uppercase and
|
||||
lowercase and letters.
|
||||
"""
|
||||
return "".join(random.choice(_AUTO_ID_CHARS) for _ in six.moves.xrange(20))
|
||||
|
||||
|
||||
def _item_to_document_ref(iterator, item):
|
||||
"""Convert Document resource to document ref.
|
||||
|
||||
Args:
|
||||
iterator (google.api_core.page_iterator.GRPCIterator):
|
||||
iterator response
|
||||
item (dict): document resource
|
||||
"""
|
||||
document_id = item.name.split(_helpers.DOCUMENT_PATH_DELIMITER)[-1]
|
||||
return iterator.collection.document(document_id)
|
787
venv/Lib/site-packages/google/cloud/firestore_v1/document.py
Normal file
787
venv/Lib/site-packages/google/cloud/firestore_v1/document.py
Normal file
|
@ -0,0 +1,787 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Classes for representing documents for the Google Cloud Firestore API."""
|
||||
|
||||
import copy
|
||||
|
||||
import six
|
||||
|
||||
from google.api_core import exceptions
|
||||
from google.cloud.firestore_v1 import _helpers
|
||||
from google.cloud.firestore_v1 import field_path as field_path_module
|
||||
from google.cloud.firestore_v1.proto import common_pb2
|
||||
from google.cloud.firestore_v1.watch import Watch
|
||||
|
||||
|
||||
class DocumentReference(object):
|
||||
"""A reference to a document in a Firestore database.
|
||||
|
||||
The document may already exist or can be created by this class.
|
||||
|
||||
Args:
|
||||
path (Tuple[str, ...]): The components in the document path.
|
||||
This is a series of strings representing each collection and
|
||||
sub-collection ID, as well as the document IDs for any documents
|
||||
that contain a sub-collection (as well as the base document).
|
||||
kwargs (dict): The keyword arguments for the constructor. The only
|
||||
supported keyword is ``client`` and it must be a
|
||||
:class:`~google.cloud.firestore_v1.client.Client`. It represents
|
||||
the client that created this document reference.
|
||||
|
||||
Raises:
|
||||
ValueError: if
|
||||
|
||||
* the ``path`` is empty
|
||||
* there are an even number of elements
|
||||
* a collection ID in ``path`` is not a string
|
||||
* a document ID in ``path`` is not a string
|
||||
TypeError: If a keyword other than ``client`` is used.
|
||||
"""
|
||||
|
||||
_document_path_internal = None
|
||||
|
||||
def __init__(self, *path, **kwargs):
|
||||
_helpers.verify_path(path, is_collection=False)
|
||||
self._path = path
|
||||
self._client = kwargs.pop("client", None)
|
||||
if kwargs:
|
||||
raise TypeError(
|
||||
"Received unexpected arguments", kwargs, "Only `client` is supported"
|
||||
)
|
||||
|
||||
def __copy__(self):
|
||||
"""Shallow copy the instance.
|
||||
|
||||
We leave the client "as-is" but tuple-unpack the path.
|
||||
|
||||
Returns:
|
||||
.DocumentReference: A copy of the current document.
|
||||
"""
|
||||
result = self.__class__(*self._path, client=self._client)
|
||||
result._document_path_internal = self._document_path_internal
|
||||
return result
|
||||
|
||||
def __deepcopy__(self, unused_memo):
|
||||
"""Deep copy the instance.
|
||||
|
||||
This isn't a true deep copy, wee leave the client "as-is" but
|
||||
tuple-unpack the path.
|
||||
|
||||
Returns:
|
||||
.DocumentReference: A copy of the current document.
|
||||
"""
|
||||
return self.__copy__()
|
||||
|
||||
def __eq__(self, other):
|
||||
"""Equality check against another instance.
|
||||
|
||||
Args:
|
||||
other (Any): A value to compare against.
|
||||
|
||||
Returns:
|
||||
Union[bool, NotImplementedType]: Indicating if the values are
|
||||
equal.
|
||||
"""
|
||||
if isinstance(other, DocumentReference):
|
||||
return self._client == other._client and self._path == other._path
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._path) + hash(self._client)
|
||||
|
||||
def __ne__(self, other):
|
||||
"""Inequality check against another instance.
|
||||
|
||||
Args:
|
||||
other (Any): A value to compare against.
|
||||
|
||||
Returns:
|
||||
Union[bool, NotImplementedType]: Indicating if the values are
|
||||
not equal.
|
||||
"""
|
||||
if isinstance(other, DocumentReference):
|
||||
return self._client != other._client or self._path != other._path
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
"""Database-relative for this document.
|
||||
|
||||
Returns:
|
||||
str: The document's relative path.
|
||||
"""
|
||||
return "/".join(self._path)
|
||||
|
||||
@property
|
||||
def _document_path(self):
|
||||
"""Create and cache the full path for this document.
|
||||
|
||||
Of the form:
|
||||
|
||||
``projects/{project_id}/databases/{database_id}/...
|
||||
documents/{document_path}``
|
||||
|
||||
Returns:
|
||||
str: The full document path.
|
||||
|
||||
Raises:
|
||||
ValueError: If the current document reference has no ``client``.
|
||||
"""
|
||||
if self._document_path_internal is None:
|
||||
if self._client is None:
|
||||
raise ValueError("A document reference requires a `client`.")
|
||||
self._document_path_internal = _get_document_path(self._client, self._path)
|
||||
|
||||
return self._document_path_internal
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""The document identifier (within its collection).
|
||||
|
||||
Returns:
|
||||
str: The last component of the path.
|
||||
"""
|
||||
return self._path[-1]
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
"""Collection that owns the current document.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.collection.CollectionReference`:
|
||||
The parent collection.
|
||||
"""
|
||||
parent_path = self._path[:-1]
|
||||
return self._client.collection(*parent_path)
|
||||
|
||||
def collection(self, collection_id):
|
||||
"""Create a sub-collection underneath the current document.
|
||||
|
||||
Args:
|
||||
collection_id (str): The sub-collection identifier (sometimes
|
||||
referred to as the "kind").
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.collection.CollectionReference`:
|
||||
The child collection.
|
||||
"""
|
||||
child_path = self._path + (collection_id,)
|
||||
return self._client.collection(*child_path)
|
||||
|
||||
def create(self, document_data):
|
||||
"""Create the current document in the Firestore database.
|
||||
|
||||
Args:
|
||||
document_data (dict): Property names and values to use for
|
||||
creating a document.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.types.WriteResult`:
|
||||
The write result corresponding to the committed document.
|
||||
A write result contains an ``update_time`` field.
|
||||
|
||||
Raises:
|
||||
:class:`~google.cloud.exceptions.Conflict`:
|
||||
If the document already exists.
|
||||
"""
|
||||
batch = self._client.batch()
|
||||
batch.create(self, document_data)
|
||||
write_results = batch.commit()
|
||||
return _first_write_result(write_results)
|
||||
|
||||
def set(self, document_data, merge=False):
|
||||
"""Replace the current document in the Firestore database.
|
||||
|
||||
A write ``option`` can be specified to indicate preconditions of
|
||||
the "set" operation. If no ``option`` is specified and this document
|
||||
doesn't exist yet, this method will create it.
|
||||
|
||||
Overwrites all content for the document with the fields in
|
||||
``document_data``. This method performs almost the same functionality
|
||||
as :meth:`create`. The only difference is that this method doesn't
|
||||
make any requirements on the existence of the document (unless
|
||||
``option`` is used), whereas as :meth:`create` will fail if the
|
||||
document already exists.
|
||||
|
||||
Args:
|
||||
document_data (dict): Property names and values to use for
|
||||
replacing a document.
|
||||
merge (Optional[bool] or Optional[List<apispec>]):
|
||||
If True, apply merging instead of overwriting the state
|
||||
of the document.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.types.WriteResult`:
|
||||
The write result corresponding to the committed document. A write
|
||||
result contains an ``update_time`` field.
|
||||
"""
|
||||
batch = self._client.batch()
|
||||
batch.set(self, document_data, merge=merge)
|
||||
write_results = batch.commit()
|
||||
return _first_write_result(write_results)
|
||||
|
||||
def update(self, field_updates, option=None):
|
||||
"""Update an existing document in the Firestore database.
|
||||
|
||||
By default, this method verifies that the document exists on the
|
||||
server before making updates. A write ``option`` can be specified to
|
||||
override these preconditions.
|
||||
|
||||
Each key in ``field_updates`` can either be a field name or a
|
||||
**field path** (For more information on **field paths**, see
|
||||
:meth:`~google.cloud.firestore_v1.client.Client.field_path`.) To
|
||||
illustrate this, consider a document with
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot = document.get()
|
||||
>>> snapshot.to_dict()
|
||||
{
|
||||
'foo': {
|
||||
'bar': 'baz',
|
||||
},
|
||||
'other': True,
|
||||
}
|
||||
|
||||
stored on the server. If the field name is used in the update:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> field_updates = {
|
||||
... 'foo': {
|
||||
... 'quux': 800,
|
||||
... },
|
||||
... }
|
||||
>>> document.update(field_updates)
|
||||
|
||||
then all of ``foo`` will be overwritten on the server and the new
|
||||
value will be
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot = document.get()
|
||||
>>> snapshot.to_dict()
|
||||
{
|
||||
'foo': {
|
||||
'quux': 800,
|
||||
},
|
||||
'other': True,
|
||||
}
|
||||
|
||||
On the other hand, if a ``.``-delimited **field path** is used in the
|
||||
update:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> field_updates = {
|
||||
... 'foo.quux': 800,
|
||||
... }
|
||||
>>> document.update(field_updates)
|
||||
|
||||
then only ``foo.quux`` will be updated on the server and the
|
||||
field ``foo.bar`` will remain intact:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot = document.get()
|
||||
>>> snapshot.to_dict()
|
||||
{
|
||||
'foo': {
|
||||
'bar': 'baz',
|
||||
'quux': 800,
|
||||
},
|
||||
'other': True,
|
||||
}
|
||||
|
||||
.. warning::
|
||||
|
||||
A **field path** can only be used as a top-level key in
|
||||
``field_updates``.
|
||||
|
||||
To delete / remove a field from an existing document, use the
|
||||
:attr:`~google.cloud.firestore_v1.transforms.DELETE_FIELD` sentinel.
|
||||
So with the example above, sending
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> field_updates = {
|
||||
... 'other': firestore.DELETE_FIELD,
|
||||
... }
|
||||
>>> document.update(field_updates)
|
||||
|
||||
would update the value on the server to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot = document.get()
|
||||
>>> snapshot.to_dict()
|
||||
{
|
||||
'foo': {
|
||||
'bar': 'baz',
|
||||
},
|
||||
}
|
||||
|
||||
To set a field to the current time on the server when the
|
||||
update is received, use the
|
||||
:attr:`~google.cloud.firestore_v1.transforms.SERVER_TIMESTAMP`
|
||||
sentinel.
|
||||
Sending
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> field_updates = {
|
||||
... 'foo.now': firestore.SERVER_TIMESTAMP,
|
||||
... }
|
||||
>>> document.update(field_updates)
|
||||
|
||||
would update the value on the server to:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot = document.get()
|
||||
>>> snapshot.to_dict()
|
||||
{
|
||||
'foo': {
|
||||
'bar': 'baz',
|
||||
'now': datetime.datetime(2012, ...),
|
||||
},
|
||||
'other': True,
|
||||
}
|
||||
|
||||
Args:
|
||||
field_updates (dict): Field names or paths to update and values
|
||||
to update with.
|
||||
option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]):
|
||||
A write option to make assertions / preconditions on the server
|
||||
state of the document before applying changes.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.types.WriteResult`:
|
||||
The write result corresponding to the updated document. A write
|
||||
result contains an ``update_time`` field.
|
||||
|
||||
Raises:
|
||||
~google.cloud.exceptions.NotFound: If the document does not exist.
|
||||
"""
|
||||
batch = self._client.batch()
|
||||
batch.update(self, field_updates, option=option)
|
||||
write_results = batch.commit()
|
||||
return _first_write_result(write_results)
|
||||
|
||||
def delete(self, option=None):
|
||||
"""Delete the current document in the Firestore database.
|
||||
|
||||
Args:
|
||||
option (Optional[:class:`~google.cloud.firestore_v1.client.WriteOption`]):
|
||||
A write option to make assertions / preconditions on the server
|
||||
state of the document before applying changes.
|
||||
|
||||
Returns:
|
||||
:class:`google.protobuf.timestamp_pb2.Timestamp`:
|
||||
The time that the delete request was received by the server.
|
||||
If the document did not exist when the delete was sent (i.e.
|
||||
nothing was deleted), this method will still succeed and will
|
||||
still return the time that the request was received by the server.
|
||||
"""
|
||||
write_pb = _helpers.pb_for_delete(self._document_path, option)
|
||||
commit_response = self._client._firestore_api.commit(
|
||||
self._client._database_string,
|
||||
[write_pb],
|
||||
transaction=None,
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
|
||||
return commit_response.commit_time
|
||||
|
||||
def get(self, field_paths=None, transaction=None):
|
||||
"""Retrieve a snapshot of the current document.
|
||||
|
||||
See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for
|
||||
more information on **field paths**.
|
||||
|
||||
If a ``transaction`` is used and it already has write operations
|
||||
added, this method cannot be used (i.e. read-after-write is not
|
||||
allowed).
|
||||
|
||||
Args:
|
||||
field_paths (Optional[Iterable[str, ...]]): An iterable of field
|
||||
paths (``.``-delimited list of field names) to use as a
|
||||
projection of document fields in the returned results. If
|
||||
no value is provided, all fields will be returned.
|
||||
transaction (Optional[:class:`~google.cloud.firestore_v1.transaction.Transaction`]):
|
||||
An existing transaction that this reference
|
||||
will be retrieved in.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`:
|
||||
A snapshot of the current document. If the document does not
|
||||
exist at the time of the snapshot is taken, the snapshot's
|
||||
:attr:`reference`, :attr:`data`, :attr:`update_time`, and
|
||||
:attr:`create_time` attributes will all be ``None`` and
|
||||
its :attr:`exists` attribute will be ``False``.
|
||||
"""
|
||||
if isinstance(field_paths, six.string_types):
|
||||
raise ValueError("'field_paths' must be a sequence of paths, not a string.")
|
||||
|
||||
if field_paths is not None:
|
||||
mask = common_pb2.DocumentMask(field_paths=sorted(field_paths))
|
||||
else:
|
||||
mask = None
|
||||
|
||||
firestore_api = self._client._firestore_api
|
||||
try:
|
||||
document_pb = firestore_api.get_document(
|
||||
self._document_path,
|
||||
mask=mask,
|
||||
transaction=_helpers.get_transaction_id(transaction),
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
except exceptions.NotFound:
|
||||
data = None
|
||||
exists = False
|
||||
create_time = None
|
||||
update_time = None
|
||||
else:
|
||||
data = _helpers.decode_dict(document_pb.fields, self._client)
|
||||
exists = True
|
||||
create_time = document_pb.create_time
|
||||
update_time = document_pb.update_time
|
||||
|
||||
return DocumentSnapshot(
|
||||
reference=self,
|
||||
data=data,
|
||||
exists=exists,
|
||||
read_time=None, # No server read_time available
|
||||
create_time=create_time,
|
||||
update_time=update_time,
|
||||
)
|
||||
|
||||
def collections(self, page_size=None):
|
||||
"""List subcollections of the current document.
|
||||
|
||||
Args:
|
||||
page_size (Optional[int]]): The maximum number of collections
|
||||
in each page of results from this request. Non-positive values
|
||||
are ignored. Defaults to a sensible value set by the API.
|
||||
|
||||
Returns:
|
||||
Sequence[:class:`~google.cloud.firestore_v1.collection.CollectionReference`]:
|
||||
iterator of subcollections of the current document. If the
|
||||
document does not exist at the time of `snapshot`, the
|
||||
iterator will be empty
|
||||
"""
|
||||
iterator = self._client._firestore_api.list_collection_ids(
|
||||
self._document_path,
|
||||
page_size=page_size,
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
iterator.document = self
|
||||
iterator.item_to_value = _item_to_collection_ref
|
||||
return iterator
|
||||
|
||||
def on_snapshot(self, callback):
|
||||
"""Watch this document.
|
||||
|
||||
This starts a watch on this document using a background thread. The
|
||||
provided callback is run on the snapshot.
|
||||
|
||||
Args:
|
||||
callback (Callable[List[:class:`~google.cloud.firestore_v1.document.DocumentSnapshot`], \
|
||||
List[:class:`~google.cloud.firestore_v1.watch.DocumentChange`], datetime.datetime], NoneType):
|
||||
a callback to run when a change occurs
|
||||
|
||||
Example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from google.cloud import firestore_v1
|
||||
|
||||
db = firestore_v1.Client()
|
||||
collection_ref = db.collection(u'users')
|
||||
|
||||
def on_snapshot(docs, changes, read_time):
|
||||
for doc in docs:
|
||||
print(u'{} => {}'.format(doc.id, doc.to_dict()))
|
||||
|
||||
doc_ref = db.collection(u'users').document(
|
||||
u'alovelace' + unique_resource_id())
|
||||
|
||||
# Watch this document
|
||||
doc_watch = doc_ref.on_snapshot(on_snapshot)
|
||||
|
||||
# Terminate this watch
|
||||
doc_watch.unsubscribe()
|
||||
"""
|
||||
return Watch.for_document(self, callback, DocumentSnapshot, DocumentReference)
|
||||
|
||||
|
||||
class DocumentSnapshot(object):
|
||||
"""A snapshot of document data in a Firestore database.
|
||||
|
||||
This represents data retrieved at a specific time and may not contain
|
||||
all fields stored for the document (i.e. a hand-picked selection of
|
||||
fields may have been retrieved).
|
||||
|
||||
Instances of this class are not intended to be constructed by hand,
|
||||
rather they'll be returned as responses to various methods, such as
|
||||
:meth:`~google.cloud.DocumentReference.get`.
|
||||
|
||||
Args:
|
||||
reference (:class:`~google.cloud.firestore_v1.document.DocumentReference`):
|
||||
A document reference corresponding to the document that contains
|
||||
the data in this snapshot.
|
||||
data (Dict[str, Any]):
|
||||
The data retrieved in the snapshot.
|
||||
exists (bool):
|
||||
Indicates if the document existed at the time the snapshot was
|
||||
retrieved.
|
||||
read_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
|
||||
The time that this snapshot was read from the server.
|
||||
create_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
|
||||
The time that this document was created.
|
||||
update_time (:class:`google.protobuf.timestamp_pb2.Timestamp`):
|
||||
The time that this document was last updated.
|
||||
"""
|
||||
|
||||
def __init__(self, reference, data, exists, read_time, create_time, update_time):
|
||||
self._reference = reference
|
||||
# We want immutable data, so callers can't modify this value
|
||||
# out from under us.
|
||||
self._data = copy.deepcopy(data)
|
||||
self._exists = exists
|
||||
self.read_time = read_time
|
||||
self.create_time = create_time
|
||||
self.update_time = update_time
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self._reference == other._reference and self._data == other._data
|
||||
|
||||
def __hash__(self):
|
||||
seconds = self.update_time.seconds
|
||||
nanos = self.update_time.nanos
|
||||
return hash(self._reference) + hash(seconds) + hash(nanos)
|
||||
|
||||
@property
|
||||
def _client(self):
|
||||
"""The client that owns the document reference for this snapshot.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.client.Client`:
|
||||
The client that owns this document.
|
||||
"""
|
||||
return self._reference._client
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
"""Existence flag.
|
||||
|
||||
Indicates if the document existed at the time this snapshot
|
||||
was retrieved.
|
||||
|
||||
Returns:
|
||||
bool: The existence flag.
|
||||
"""
|
||||
return self._exists
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""The document identifier (within its collection).
|
||||
|
||||
Returns:
|
||||
str: The last component of the path of the document.
|
||||
"""
|
||||
return self._reference.id
|
||||
|
||||
@property
|
||||
def reference(self):
|
||||
"""Document reference corresponding to document that owns this data.
|
||||
|
||||
Returns:
|
||||
:class:`~google.cloud.firestore_v1.document.DocumentReference`:
|
||||
A document reference corresponding to this document.
|
||||
"""
|
||||
return self._reference
|
||||
|
||||
def get(self, field_path):
|
||||
"""Get a value from the snapshot data.
|
||||
|
||||
If the data is nested, for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot.to_dict()
|
||||
{
|
||||
'top1': {
|
||||
'middle2': {
|
||||
'bottom3': 20,
|
||||
'bottom4': 22,
|
||||
},
|
||||
'middle5': True,
|
||||
},
|
||||
'top6': b'\x00\x01 foo',
|
||||
}
|
||||
|
||||
a **field path** can be used to access the nested data. For
|
||||
example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> snapshot.get('top1')
|
||||
{
|
||||
'middle2': {
|
||||
'bottom3': 20,
|
||||
'bottom4': 22,
|
||||
},
|
||||
'middle5': True,
|
||||
}
|
||||
>>> snapshot.get('top1.middle2')
|
||||
{
|
||||
'bottom3': 20,
|
||||
'bottom4': 22,
|
||||
}
|
||||
>>> snapshot.get('top1.middle2.bottom3')
|
||||
20
|
||||
|
||||
See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for
|
||||
more information on **field paths**.
|
||||
|
||||
A copy is returned since the data may contain mutable values,
|
||||
but the data stored in the snapshot must remain immutable.
|
||||
|
||||
Args:
|
||||
field_path (str): A field path (``.``-delimited list of
|
||||
field names).
|
||||
|
||||
Returns:
|
||||
Any or None:
|
||||
(A copy of) the value stored for the ``field_path`` or
|
||||
None if snapshot document does not exist.
|
||||
|
||||
Raises:
|
||||
KeyError: If the ``field_path`` does not match nested data
|
||||
in the snapshot.
|
||||
"""
|
||||
if not self._exists:
|
||||
return None
|
||||
nested_data = field_path_module.get_nested_value(field_path, self._data)
|
||||
return copy.deepcopy(nested_data)
|
||||
|
||||
def to_dict(self):
|
||||
"""Retrieve the data contained in this snapshot.
|
||||
|
||||
A copy is returned since the data may contain mutable values,
|
||||
but the data stored in the snapshot must remain immutable.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any] or None:
|
||||
The data in the snapshot. Returns None if reference
|
||||
does not exist.
|
||||
"""
|
||||
if not self._exists:
|
||||
return None
|
||||
return copy.deepcopy(self._data)
|
||||
|
||||
|
||||
def _get_document_path(client, path):
|
||||
"""Convert a path tuple into a full path string.
|
||||
|
||||
Of the form:
|
||||
|
||||
``projects/{project_id}/databases/{database_id}/...
|
||||
documents/{document_path}``
|
||||
|
||||
Args:
|
||||
client (:class:`~google.cloud.firestore_v1.client.Client`):
|
||||
The client that holds configuration details and a GAPIC client
|
||||
object.
|
||||
path (Tuple[str, ...]): The components in a document path.
|
||||
|
||||
Returns:
|
||||
str: The fully-qualified document path.
|
||||
"""
|
||||
parts = (client._database_string, "documents") + path
|
||||
return _helpers.DOCUMENT_PATH_DELIMITER.join(parts)
|
||||
|
||||
|
||||
def _consume_single_get(response_iterator):
|
||||
"""Consume a gRPC stream that should contain a single response.
|
||||
|
||||
The stream will correspond to a ``BatchGetDocuments`` request made
|
||||
for a single document.
|
||||
|
||||
Args:
|
||||
response_iterator (~google.cloud.exceptions.GrpcRendezvous): A
|
||||
streaming iterator returned from a ``BatchGetDocuments``
|
||||
request.
|
||||
|
||||
Returns:
|
||||
~google.cloud.proto.firestore.v1.\
|
||||
firestore_pb2.BatchGetDocumentsResponse: The single "get"
|
||||
response in the batch.
|
||||
|
||||
Raises:
|
||||
ValueError: If anything other than exactly one response is returned.
|
||||
"""
|
||||
# Calling ``list()`` consumes the entire iterator.
|
||||
all_responses = list(response_iterator)
|
||||
if len(all_responses) != 1:
|
||||
raise ValueError(
|
||||
"Unexpected response from `BatchGetDocumentsResponse`",
|
||||
all_responses,
|
||||
"Expected only one result",
|
||||
)
|
||||
|
||||
return all_responses[0]
|
||||
|
||||
|
||||
def _first_write_result(write_results):
|
||||
"""Get first write result from list.
|
||||
|
||||
For cases where ``len(write_results) > 1``, this assumes the writes
|
||||
occurred at the same time (e.g. if an update and transform are sent
|
||||
at the same time).
|
||||
|
||||
Args:
|
||||
write_results (List[google.cloud.proto.firestore.v1.\
|
||||
write_pb2.WriteResult, ...]: The write results from a
|
||||
``CommitResponse``.
|
||||
|
||||
Returns:
|
||||
google.cloud.firestore_v1.types.WriteResult: The
|
||||
lone write result from ``write_results``.
|
||||
|
||||
Raises:
|
||||
ValueError: If there are zero write results. This is likely to
|
||||
**never** occur, since the backend should be stable.
|
||||
"""
|
||||
if not write_results:
|
||||
raise ValueError("Expected at least one write result")
|
||||
|
||||
return write_results[0]
|
||||
|
||||
|
||||
def _item_to_collection_ref(iterator, item):
|
||||
"""Convert collection ID to collection ref.
|
||||
|
||||
Args:
|
||||
iterator (google.api_core.page_iterator.GRPCIterator):
|
||||
iterator response
|
||||
item (str): ID of the collection
|
||||
"""
|
||||
return iterator.document.collection(item)
|
395
venv/Lib/site-packages/google/cloud/firestore_v1/field_path.py
Normal file
395
venv/Lib/site-packages/google/cloud/firestore_v1/field_path.py
Normal file
|
@ -0,0 +1,395 @@
|
|||
# Copyright 2018 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for managing / converting field paths to / from strings."""
|
||||
|
||||
try:
|
||||
from collections import abc as collections_abc
|
||||
except ImportError: # Python 2.7
|
||||
import collections as collections_abc
|
||||
|
||||
import re
|
||||
|
||||
import six
|
||||
|
||||
|
||||
_FIELD_PATH_MISSING_TOP = "{!r} is not contained in the data"
|
||||
_FIELD_PATH_MISSING_KEY = "{!r} is not contained in the data for the key {!r}"
|
||||
_FIELD_PATH_WRONG_TYPE = (
|
||||
"The data at {!r} is not a dictionary, so it cannot contain the key {!r}"
|
||||
)
|
||||
|
||||
_FIELD_PATH_DELIMITER = "."
|
||||
_BACKSLASH = "\\"
|
||||
_ESCAPED_BACKSLASH = _BACKSLASH * 2
|
||||
_BACKTICK = "`"
|
||||
_ESCAPED_BACKTICK = _BACKSLASH + _BACKTICK
|
||||
|
||||
_SIMPLE_FIELD_NAME = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*$")
|
||||
_LEADING_ALPHA_INVALID = re.compile("^[_a-zA-Z][_a-zA-Z0-9]*[^_a-zA-Z0-9]")
|
||||
PATH_ELEMENT_TOKENS = [
|
||||
("SIMPLE", r"[_a-zA-Z][_a-zA-Z0-9]*"), # unquoted elements
|
||||
("QUOTED", r"`(?:\\`|[^`])*?`"), # quoted elements, unquoted
|
||||
("DOT", r"\."), # separator
|
||||
]
|
||||
TOKENS_PATTERN = "|".join("(?P<{}>{})".format(*pair) for pair in PATH_ELEMENT_TOKENS)
|
||||
TOKENS_REGEX = re.compile(TOKENS_PATTERN)
|
||||
|
||||
|
||||
def _tokenize_field_path(path):
|
||||
"""Lex a field path into tokens (including dots).
|
||||
|
||||
Args:
|
||||
path (str): field path to be lexed.
|
||||
Returns:
|
||||
List(str): tokens
|
||||
"""
|
||||
pos = 0
|
||||
get_token = TOKENS_REGEX.match
|
||||
match = get_token(path)
|
||||
while match is not None:
|
||||
type_ = match.lastgroup
|
||||
value = match.group(type_)
|
||||
yield value
|
||||
pos = match.end()
|
||||
match = get_token(path, pos)
|
||||
if pos != len(path):
|
||||
raise ValueError("Path {} not consumed, residue: {}".format(path, path[pos:]))
|
||||
|
||||
|
||||
def split_field_path(path):
|
||||
"""Split a field path into valid elements (without dots).
|
||||
|
||||
Args:
|
||||
path (str): field path to be lexed.
|
||||
Returns:
|
||||
List(str): tokens
|
||||
Raises:
|
||||
ValueError: if the path does not match the elements-interspersed-
|
||||
with-dots pattern.
|
||||
"""
|
||||
if not path:
|
||||
return []
|
||||
|
||||
elements = []
|
||||
want_dot = False
|
||||
|
||||
for element in _tokenize_field_path(path):
|
||||
if want_dot:
|
||||
if element != ".":
|
||||
raise ValueError("Invalid path: {}".format(path))
|
||||
else:
|
||||
want_dot = False
|
||||
else:
|
||||
if element == ".":
|
||||
raise ValueError("Invalid path: {}".format(path))
|
||||
elements.append(element)
|
||||
want_dot = True
|
||||
|
||||
if not want_dot or not elements:
|
||||
raise ValueError("Invalid path: {}".format(path))
|
||||
|
||||
return elements
|
||||
|
||||
|
||||
def parse_field_path(api_repr):
|
||||
"""Parse a **field path** from into a list of nested field names.
|
||||
|
||||
See :func:`field_path` for more on **field paths**.
|
||||
|
||||
Args:
|
||||
api_repr (str):
|
||||
The unique Firestore api representation which consists of
|
||||
either simple or UTF-8 field names. It cannot exceed
|
||||
1500 bytes, and cannot be empty. Simple field names match
|
||||
``'^[_a-zA-Z][_a-zA-Z0-9]*$'``. All other field names are
|
||||
escaped by surrounding them with backticks.
|
||||
|
||||
Returns:
|
||||
List[str, ...]: The list of field names in the field path.
|
||||
"""
|
||||
# code dredged back up from
|
||||
# https://github.com/googleapis/google-cloud-python/pull/5109/files
|
||||
field_names = []
|
||||
for field_name in split_field_path(api_repr):
|
||||
# non-simple field name
|
||||
if field_name[0] == "`" and field_name[-1] == "`":
|
||||
field_name = field_name[1:-1]
|
||||
field_name = field_name.replace(_ESCAPED_BACKTICK, _BACKTICK)
|
||||
field_name = field_name.replace(_ESCAPED_BACKSLASH, _BACKSLASH)
|
||||
field_names.append(field_name)
|
||||
return field_names
|
||||
|
||||
|
||||
def render_field_path(field_names):
|
||||
"""Create a **field path** from a list of nested field names.
|
||||
|
||||
A **field path** is a ``.``-delimited concatenation of the field
|
||||
names. It is used to represent a nested field. For example,
|
||||
in the data
|
||||
|
||||
.. code-block: python
|
||||
|
||||
data = {
|
||||
'aa': {
|
||||
'bb': {
|
||||
'cc': 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
the field path ``'aa.bb.cc'`` represents that data stored in
|
||||
``data['aa']['bb']['cc']``.
|
||||
|
||||
Args:
|
||||
field_names (Iterable[str, ...]): The list of field names.
|
||||
|
||||
Returns:
|
||||
str: The ``.``-delimited field path.
|
||||
"""
|
||||
result = []
|
||||
|
||||
for field_name in field_names:
|
||||
match = _SIMPLE_FIELD_NAME.match(field_name)
|
||||
if match and match.group(0) == field_name:
|
||||
result.append(field_name)
|
||||
else:
|
||||
replaced = field_name.replace(_BACKSLASH, _ESCAPED_BACKSLASH).replace(
|
||||
_BACKTICK, _ESCAPED_BACKTICK
|
||||
)
|
||||
result.append(_BACKTICK + replaced + _BACKTICK)
|
||||
|
||||
return _FIELD_PATH_DELIMITER.join(result)
|
||||
|
||||
|
||||
get_field_path = render_field_path # backward-compatibility
|
||||
|
||||
|
||||
def get_nested_value(field_path, data):
|
||||
"""Get a (potentially nested) value from a dictionary.
|
||||
|
||||
If the data is nested, for example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> data
|
||||
{
|
||||
'top1': {
|
||||
'middle2': {
|
||||
'bottom3': 20,
|
||||
'bottom4': 22,
|
||||
},
|
||||
'middle5': True,
|
||||
},
|
||||
'top6': b'\x00\x01 foo',
|
||||
}
|
||||
|
||||
a **field path** can be used to access the nested data. For
|
||||
example:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> get_nested_value('top1', data)
|
||||
{
|
||||
'middle2': {
|
||||
'bottom3': 20,
|
||||
'bottom4': 22,
|
||||
},
|
||||
'middle5': True,
|
||||
}
|
||||
>>> get_nested_value('top1.middle2', data)
|
||||
{
|
||||
'bottom3': 20,
|
||||
'bottom4': 22,
|
||||
}
|
||||
>>> get_nested_value('top1.middle2.bottom3', data)
|
||||
20
|
||||
|
||||
See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for
|
||||
more information on **field paths**.
|
||||
|
||||
Args:
|
||||
field_path (str): A field path (``.``-delimited list of
|
||||
field names).
|
||||
data (Dict[str, Any]): The (possibly nested) data.
|
||||
|
||||
Returns:
|
||||
Any: (A copy of) the value stored for the ``field_path``.
|
||||
|
||||
Raises:
|
||||
KeyError: If the ``field_path`` does not match nested data.
|
||||
"""
|
||||
field_names = parse_field_path(field_path)
|
||||
|
||||
nested_data = data
|
||||
for index, field_name in enumerate(field_names):
|
||||
if isinstance(nested_data, collections_abc.Mapping):
|
||||
if field_name in nested_data:
|
||||
nested_data = nested_data[field_name]
|
||||
else:
|
||||
if index == 0:
|
||||
msg = _FIELD_PATH_MISSING_TOP.format(field_name)
|
||||
raise KeyError(msg)
|
||||
else:
|
||||
partial = render_field_path(field_names[:index])
|
||||
msg = _FIELD_PATH_MISSING_KEY.format(field_name, partial)
|
||||
raise KeyError(msg)
|
||||
else:
|
||||
partial = render_field_path(field_names[:index])
|
||||
msg = _FIELD_PATH_WRONG_TYPE.format(partial, field_name)
|
||||
raise KeyError(msg)
|
||||
|
||||
return nested_data
|
||||
|
||||
|
||||
class FieldPath(object):
|
||||
"""Field Path object for client use.
|
||||
|
||||
A field path is a sequence of element keys, separated by periods.
|
||||
Each element key can be either a simple identifier, or a full unicode
|
||||
string.
|
||||
|
||||
In the string representation of a field path, non-identifier elements
|
||||
must be quoted using backticks, with internal backticks and backslashes
|
||||
escaped with a backslash.
|
||||
|
||||
Args:
|
||||
parts: (one or more strings)
|
||||
Indicating path of the key to be used.
|
||||
"""
|
||||
|
||||
def __init__(self, *parts):
|
||||
for part in parts:
|
||||
if not isinstance(part, six.string_types) or not part:
|
||||
error = "One or more components is not a string or is empty."
|
||||
raise ValueError(error)
|
||||
self.parts = tuple(parts)
|
||||
|
||||
@classmethod
|
||||
def from_api_repr(cls, api_repr):
|
||||
"""Factory: create a FieldPath from the string formatted per the API.
|
||||
|
||||
Args:
|
||||
api_repr (str): a string path, with non-identifier elements quoted
|
||||
It cannot exceed 1500 characters, and cannot be empty.
|
||||
Returns:
|
||||
(:class:`FieldPath`) An instance parsed from ``api_repr``.
|
||||
Raises:
|
||||
ValueError if the parsing fails
|
||||
"""
|
||||
api_repr = api_repr.strip()
|
||||
if not api_repr:
|
||||
raise ValueError("Field path API representation cannot be empty.")
|
||||
return cls(*parse_field_path(api_repr))
|
||||
|
||||
@classmethod
|
||||
def from_string(cls, path_string):
|
||||
"""Factory: create a FieldPath from a unicode string representation.
|
||||
|
||||
This method splits on the character `.` and disallows the
|
||||
characters `~*/[]`. To create a FieldPath whose components have
|
||||
those characters, call the constructor.
|
||||
|
||||
Args:
|
||||
path_string (str): A unicode string which cannot contain
|
||||
`~*/[]` characters, cannot exceed 1500 bytes, and cannot be empty.
|
||||
|
||||
Returns:
|
||||
(:class:`FieldPath`) An instance parsed from ``path_string``.
|
||||
"""
|
||||
try:
|
||||
return cls.from_api_repr(path_string)
|
||||
except ValueError:
|
||||
elements = path_string.split(".")
|
||||
for element in elements:
|
||||
if not element:
|
||||
raise ValueError("Empty element")
|
||||
if _LEADING_ALPHA_INVALID.match(element):
|
||||
raise ValueError(
|
||||
"Non-alphanum char in element with leading alpha: {}".format(
|
||||
element
|
||||
)
|
||||
)
|
||||
return FieldPath(*elements)
|
||||
|
||||
def __repr__(self):
|
||||
paths = ""
|
||||
for part in self.parts:
|
||||
paths += "'" + part + "',"
|
||||
paths = paths[:-1]
|
||||
return "FieldPath({})".format(paths)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.to_api_repr())
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, FieldPath):
|
||||
return self.parts == other.parts
|
||||
return NotImplemented
|
||||
|
||||
def __lt__(self, other):
|
||||
if isinstance(other, FieldPath):
|
||||
return self.parts < other.parts
|
||||
return NotImplemented
|
||||
|
||||
def __add__(self, other):
|
||||
"""Adds `other` field path to end of this field path.
|
||||
|
||||
Args:
|
||||
other (~google.cloud.firestore_v1._helpers.FieldPath, str):
|
||||
The field path to add to the end of this `FieldPath`.
|
||||
"""
|
||||
if isinstance(other, FieldPath):
|
||||
parts = self.parts + other.parts
|
||||
return FieldPath(*parts)
|
||||
elif isinstance(other, six.string_types):
|
||||
parts = self.parts + FieldPath.from_string(other).parts
|
||||
return FieldPath(*parts)
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def to_api_repr(self):
|
||||
"""Render a quoted string representation of the FieldPath
|
||||
|
||||
Returns:
|
||||
(str) Quoted string representation of the path stored
|
||||
within this FieldPath.
|
||||
"""
|
||||
return render_field_path(self.parts)
|
||||
|
||||
def eq_or_parent(self, other):
|
||||
"""Check whether ``other`` is an ancestor.
|
||||
|
||||
Returns:
|
||||
(bool) True IFF ``other`` is an ancestor or equal to ``self``,
|
||||
else False.
|
||||
"""
|
||||
return self.parts[: len(other.parts)] == other.parts[: len(self.parts)]
|
||||
|
||||
def lineage(self):
|
||||
"""Return field paths for all parents.
|
||||
|
||||
Returns: Set[:class:`FieldPath`]
|
||||
"""
|
||||
indexes = six.moves.range(1, len(self.parts))
|
||||
return {FieldPath(*self.parts[:index]) for index in indexes}
|
||||
|
||||
@staticmethod
|
||||
def document_id():
|
||||
"""A special FieldPath value to refer to the ID of a document. It can be used
|
||||
in queries to sort or filter by the document ID.
|
||||
|
||||
Returns: A special sentinel value to refer to the ID of a document.
|
||||
"""
|
||||
return "__name__"
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
180
venv/Lib/site-packages/google/cloud/firestore_v1/gapic/enums.py
Normal file
180
venv/Lib/site-packages/google/cloud/firestore_v1/gapic/enums.py
Normal file
|
@ -0,0 +1,180 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2020 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Wrappers for protocol buffer enum types."""
|
||||
|
||||
import enum
|
||||
|
||||
|
||||
class NullValue(enum.IntEnum):
|
||||
"""
|
||||
``NullValue`` is a singleton enumeration to represent the null value
|
||||
for the ``Value`` type union.
|
||||
|
||||
The JSON representation for ``NullValue`` is JSON ``null``.
|
||||
|
||||
Attributes:
|
||||
NULL_VALUE (int): Null value.
|
||||
"""
|
||||
|
||||
NULL_VALUE = 0
|
||||
|
||||
|
||||
class DocumentTransform(object):
|
||||
class FieldTransform(object):
|
||||
class ServerValue(enum.IntEnum):
|
||||
"""
|
||||
A value that is calculated by the server.
|
||||
|
||||
Attributes:
|
||||
SERVER_VALUE_UNSPECIFIED (int): Unspecified. This value must not be used.
|
||||
REQUEST_TIME (int): The time at which the server processed the request, with millisecond
|
||||
precision.
|
||||
"""
|
||||
|
||||
SERVER_VALUE_UNSPECIFIED = 0
|
||||
REQUEST_TIME = 1
|
||||
|
||||
|
||||
class StructuredQuery(object):
|
||||
class Direction(enum.IntEnum):
|
||||
"""
|
||||
A sort direction.
|
||||
|
||||
Attributes:
|
||||
DIRECTION_UNSPECIFIED (int): Unspecified.
|
||||
ASCENDING (int): Ascending.
|
||||
DESCENDING (int): Descending.
|
||||
"""
|
||||
|
||||
DIRECTION_UNSPECIFIED = 0
|
||||
ASCENDING = 1
|
||||
DESCENDING = 2
|
||||
|
||||
class CompositeFilter(object):
|
||||
class Operator(enum.IntEnum):
|
||||
"""
|
||||
A composite filter operator.
|
||||
|
||||
Attributes:
|
||||
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
|
||||
AND (int): The results are required to satisfy each of the combined filters.
|
||||
"""
|
||||
|
||||
OPERATOR_UNSPECIFIED = 0
|
||||
AND = 1
|
||||
|
||||
class FieldFilter(object):
|
||||
class Operator(enum.IntEnum):
|
||||
"""
|
||||
A field filter operator.
|
||||
|
||||
Attributes:
|
||||
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
|
||||
LESS_THAN (int): The given ``field`` is less than the given ``value``.
|
||||
|
||||
Requires:
|
||||
|
||||
- That ``field`` come first in ``order_by``.
|
||||
LESS_THAN_OR_EQUAL (int): The given ``field`` is less than or equal to the given ``value``.
|
||||
|
||||
Requires:
|
||||
|
||||
- That ``field`` come first in ``order_by``.
|
||||
GREATER_THAN (int): The given ``field`` is greater than the given ``value``.
|
||||
|
||||
Requires:
|
||||
|
||||
- That ``field`` come first in ``order_by``.
|
||||
GREATER_THAN_OR_EQUAL (int): The given ``field`` is greater than or equal to the given ``value``.
|
||||
|
||||
Requires:
|
||||
|
||||
- That ``field`` come first in ``order_by``.
|
||||
EQUAL (int): The given ``field`` is equal to the given ``value``.
|
||||
ARRAY_CONTAINS (int): The given ``field`` is an array that contains the given ``value``.
|
||||
IN (int): The given ``field`` is equal to at least one value in the given
|
||||
array.
|
||||
|
||||
Requires:
|
||||
|
||||
- That ``value`` is a non-empty ``ArrayValue`` with at most 10 values.
|
||||
- No other ``IN``, ``ARRAY_CONTAINS_ANY``, or ``NOT_IN``.
|
||||
ARRAY_CONTAINS_ANY (int): The given ``field`` is an array that contains any of the values in
|
||||
the given array.
|
||||
|
||||
Requires:
|
||||
|
||||
- That ``value`` is a non-empty ``ArrayValue`` with at most 10 values.
|
||||
- No other ``IN``, ``ARRAY_CONTAINS_ANY``, or ``NOT_IN``.
|
||||
"""
|
||||
|
||||
OPERATOR_UNSPECIFIED = 0
|
||||
LESS_THAN = 1
|
||||
LESS_THAN_OR_EQUAL = 2
|
||||
GREATER_THAN = 3
|
||||
GREATER_THAN_OR_EQUAL = 4
|
||||
EQUAL = 5
|
||||
ARRAY_CONTAINS = 7
|
||||
IN = 8
|
||||
ARRAY_CONTAINS_ANY = 9
|
||||
|
||||
class UnaryFilter(object):
|
||||
class Operator(enum.IntEnum):
|
||||
"""
|
||||
A unary operator.
|
||||
|
||||
Attributes:
|
||||
OPERATOR_UNSPECIFIED (int): Unspecified. This value must not be used.
|
||||
IS_NAN (int): The given ``field`` is equal to ``NaN``.
|
||||
IS_NULL (int): The given ``field`` is equal to ``NULL``.
|
||||
"""
|
||||
|
||||
OPERATOR_UNSPECIFIED = 0
|
||||
IS_NAN = 2
|
||||
IS_NULL = 3
|
||||
|
||||
|
||||
class TargetChange(object):
|
||||
class TargetChangeType(enum.IntEnum):
|
||||
"""
|
||||
The type of change.
|
||||
|
||||
Attributes:
|
||||
NO_CHANGE (int): No change has occurred. Used only to send an updated
|
||||
``resume_token``.
|
||||
ADD (int): The targets have been added.
|
||||
REMOVE (int): The targets have been removed.
|
||||
CURRENT (int): The targets reflect all changes committed before the targets were
|
||||
added to the stream.
|
||||
|
||||
This will be sent after or with a ``read_time`` that is greater than or
|
||||
equal to the time at which the targets were added.
|
||||
|
||||
Listeners can wait for this change if read-after-write semantics are
|
||||
desired.
|
||||
RESET (int): The targets have been reset, and a new initial state for the targets
|
||||
will be returned in subsequent changes.
|
||||
|
||||
After the initial state is complete, ``CURRENT`` will be returned even
|
||||
if the target was previously indicated to be ``CURRENT``.
|
||||
"""
|
||||
|
||||
NO_CHANGE = 0
|
||||
ADD = 1
|
||||
REMOVE = 2
|
||||
CURRENT = 3
|
||||
RESET = 4
|
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,109 @@
|
|||
config = {
|
||||
"interfaces": {
|
||||
"google.firestore.v1.Firestore": {
|
||||
"retry_codes": {
|
||||
"idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"],
|
||||
"aborted_unavailable": ["ABORTED", "UNAVAILABLE"],
|
||||
"non_idempotent": [],
|
||||
"idempotent2": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
|
||||
},
|
||||
"retry_params": {
|
||||
"default": {
|
||||
"initial_retry_delay_millis": 100,
|
||||
"retry_delay_multiplier": 1.3,
|
||||
"max_retry_delay_millis": 60000,
|
||||
"initial_rpc_timeout_millis": 60000,
|
||||
"rpc_timeout_multiplier": 1.0,
|
||||
"max_rpc_timeout_millis": 60000,
|
||||
"total_timeout_millis": 600000,
|
||||
},
|
||||
"streaming": {
|
||||
"initial_retry_delay_millis": 100,
|
||||
"retry_delay_multiplier": 1.3,
|
||||
"max_retry_delay_millis": 60000,
|
||||
"initial_rpc_timeout_millis": 60000,
|
||||
"rpc_timeout_multiplier": 1.0,
|
||||
"max_rpc_timeout_millis": 60000,
|
||||
"total_timeout_millis": 600000,
|
||||
},
|
||||
},
|
||||
"methods": {
|
||||
"GetDocument": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent2",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"ListDocuments": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent2",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"CreateDocument": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "non_idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"UpdateDocument": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "non_idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"DeleteDocument": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"BatchGetDocuments": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "streaming",
|
||||
},
|
||||
"BatchWrite": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "aborted_unavailable",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"BeginTransaction": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"Commit": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "non_idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"Rollback": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"RunQuery": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "streaming",
|
||||
},
|
||||
"Write": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "non_idempotent",
|
||||
"retry_params_name": "streaming",
|
||||
},
|
||||
"Listen": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "streaming",
|
||||
},
|
||||
"ListCollectionIds": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
"PartitionQuery": {
|
||||
"timeout_millis": 60000,
|
||||
"retry_codes_name": "non_idempotent",
|
||||
"retry_params_name": "default",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,319 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright 2020 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import google.api_core.grpc_helpers
|
||||
|
||||
from google.cloud.firestore_v1.proto import firestore_pb2_grpc
|
||||
|
||||
|
||||
class FirestoreGrpcTransport(object):
|
||||
"""gRPC transport class providing stubs for
|
||||
google.firestore.v1 Firestore API.
|
||||
|
||||
The transport provides access to the raw gRPC stubs,
|
||||
which can be used to take advantage of advanced
|
||||
features of gRPC.
|
||||
"""
|
||||
|
||||
# The scopes needed to make gRPC calls to all of the methods defined
|
||||
# in this service.
|
||||
_OAUTH_SCOPES = (
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/datastore",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self, channel=None, credentials=None, address="firestore.googleapis.com:443"
|
||||
):
|
||||
"""Instantiate the transport class.
|
||||
|
||||
Args:
|
||||
channel (grpc.Channel): A ``Channel`` instance through
|
||||
which to make calls. This argument is mutually exclusive
|
||||
with ``credentials``; providing both will raise an exception.
|
||||
credentials (google.auth.credentials.Credentials): The
|
||||
authorization credentials to attach to requests. These
|
||||
credentials identify this application to the service. If none
|
||||
are specified, the client will attempt to ascertain the
|
||||
credentials from the environment.
|
||||
address (str): The address where the service is hosted.
|
||||
"""
|
||||
# If both `channel` and `credentials` are specified, raise an
|
||||
# exception (channels come with credentials baked in already).
|
||||
if channel is not None and credentials is not None:
|
||||
raise ValueError(
|
||||
"The `channel` and `credentials` arguments are mutually " "exclusive.",
|
||||
)
|
||||
|
||||
# Create the channel.
|
||||
if channel is None:
|
||||
channel = self.create_channel(
|
||||
address=address,
|
||||
credentials=credentials,
|
||||
options={
|
||||
"grpc.max_send_message_length": -1,
|
||||
"grpc.max_receive_message_length": -1,
|
||||
}.items(),
|
||||
)
|
||||
|
||||
self._channel = channel
|
||||
|
||||
# gRPC uses objects called "stubs" that are bound to the
|
||||
# channel and provide a basic method for each RPC.
|
||||
self._stubs = {
|
||||
"firestore_stub": firestore_pb2_grpc.FirestoreStub(channel),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create_channel(
|
||||
cls, address="firestore.googleapis.com:443", credentials=None, **kwargs
|
||||
):
|
||||
"""Create and return a gRPC channel object.
|
||||
|
||||
Args:
|
||||
address (str): The host for the channel to use.
|
||||
credentials (~.Credentials): The
|
||||
authorization credentials to attach to requests. These
|
||||
credentials identify this application to the service. If
|
||||
none are specified, the client will attempt to ascertain
|
||||
the credentials from the environment.
|
||||
kwargs (dict): Keyword arguments, which are passed to the
|
||||
channel creation.
|
||||
|
||||
Returns:
|
||||
grpc.Channel: A gRPC channel object.
|
||||
"""
|
||||
return google.api_core.grpc_helpers.create_channel(
|
||||
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
|
||||
)
|
||||
|
||||
@property
|
||||
def channel(self):
|
||||
"""The gRPC channel used by the transport.
|
||||
|
||||
Returns:
|
||||
grpc.Channel: A gRPC channel object.
|
||||
"""
|
||||
return self._channel
|
||||
|
||||
@property
|
||||
def get_document(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.get_document`.
|
||||
|
||||
Gets a single document.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].GetDocument
|
||||
|
||||
@property
|
||||
def list_documents(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.list_documents`.
|
||||
|
||||
Lists documents.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].ListDocuments
|
||||
|
||||
@property
|
||||
def create_document(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.create_document`.
|
||||
|
||||
Creates a new document.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].CreateDocument
|
||||
|
||||
@property
|
||||
def update_document(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.update_document`.
|
||||
|
||||
Updates or inserts a document.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].UpdateDocument
|
||||
|
||||
@property
|
||||
def delete_document(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.delete_document`.
|
||||
|
||||
Deletes a document.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].DeleteDocument
|
||||
|
||||
@property
|
||||
def batch_get_documents(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.batch_get_documents`.
|
||||
|
||||
Gets multiple documents.
|
||||
|
||||
Documents returned by this method are not guaranteed to be returned in the
|
||||
same order that they were requested.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].BatchGetDocuments
|
||||
|
||||
@property
|
||||
def batch_write(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.batch_write`.
|
||||
|
||||
Applies a batch of write operations.
|
||||
|
||||
The BatchWrite method does not apply the write operations atomically and
|
||||
can apply them out of order. Method does not allow more than one write
|
||||
per document. Each write succeeds or fails independently. See the
|
||||
``BatchWriteResponse`` for the success status of each write.
|
||||
|
||||
If you require an atomically applied set of writes, use ``Commit``
|
||||
instead.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].BatchWrite
|
||||
|
||||
@property
|
||||
def begin_transaction(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.begin_transaction`.
|
||||
|
||||
Starts a new transaction.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].BeginTransaction
|
||||
|
||||
@property
|
||||
def commit(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.commit`.
|
||||
|
||||
Commits a transaction, while optionally updating documents.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].Commit
|
||||
|
||||
@property
|
||||
def rollback(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.rollback`.
|
||||
|
||||
Rolls back a transaction.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].Rollback
|
||||
|
||||
@property
|
||||
def run_query(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.run_query`.
|
||||
|
||||
Runs a query.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].RunQuery
|
||||
|
||||
@property
|
||||
def write(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.write`.
|
||||
|
||||
Streams batches of document updates and deletes, in order.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].Write
|
||||
|
||||
@property
|
||||
def listen(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.listen`.
|
||||
|
||||
Listens to changes.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].Listen
|
||||
|
||||
@property
|
||||
def list_collection_ids(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.list_collection_ids`.
|
||||
|
||||
Lists all the collection IDs underneath a document.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].ListCollectionIds
|
||||
|
||||
@property
|
||||
def partition_query(self):
|
||||
"""Return the gRPC stub for :meth:`FirestoreClient.partition_query`.
|
||||
|
||||
Partitions a query by returning partition cursors that can be used to run
|
||||
the query in parallel. The returned partition cursors are split points that
|
||||
can be used by RunQuery as starting/end points for the query results.
|
||||
|
||||
Returns:
|
||||
Callable: A callable which accepts the appropriate
|
||||
deserialized request object and returns a
|
||||
deserialized response object.
|
||||
"""
|
||||
return self._stubs["firestore_stub"].PartitionQuery
|
207
venv/Lib/site-packages/google/cloud/firestore_v1/order.py
Normal file
207
venv/Lib/site-packages/google/cloud/firestore_v1/order.py
Normal file
|
@ -0,0 +1,207 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from enum import Enum
|
||||
from google.cloud.firestore_v1._helpers import decode_value
|
||||
import math
|
||||
|
||||
|
||||
class TypeOrder(Enum):
|
||||
# NOTE: This order is defined by the backend and cannot be changed.
|
||||
NULL = 0
|
||||
BOOLEAN = 1
|
||||
NUMBER = 2
|
||||
TIMESTAMP = 3
|
||||
STRING = 4
|
||||
BLOB = 5
|
||||
REF = 6
|
||||
GEO_POINT = 7
|
||||
ARRAY = 8
|
||||
OBJECT = 9
|
||||
|
||||
@staticmethod
|
||||
def from_value(value):
|
||||
v = value.WhichOneof("value_type")
|
||||
|
||||
lut = {
|
||||
"null_value": TypeOrder.NULL,
|
||||
"boolean_value": TypeOrder.BOOLEAN,
|
||||
"integer_value": TypeOrder.NUMBER,
|
||||
"double_value": TypeOrder.NUMBER,
|
||||
"timestamp_value": TypeOrder.TIMESTAMP,
|
||||
"string_value": TypeOrder.STRING,
|
||||
"bytes_value": TypeOrder.BLOB,
|
||||
"reference_value": TypeOrder.REF,
|
||||
"geo_point_value": TypeOrder.GEO_POINT,
|
||||
"array_value": TypeOrder.ARRAY,
|
||||
"map_value": TypeOrder.OBJECT,
|
||||
}
|
||||
|
||||
if v not in lut:
|
||||
raise ValueError("Could not detect value type for " + v)
|
||||
return lut[v]
|
||||
|
||||
|
||||
class Order(object):
|
||||
"""
|
||||
Order implements the ordering semantics of the backend.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def compare(cls, left, right):
|
||||
"""
|
||||
Main comparison function for all Firestore types.
|
||||
@return -1 is left < right, 0 if left == right, otherwise 1
|
||||
"""
|
||||
# First compare the types.
|
||||
leftType = TypeOrder.from_value(left).value
|
||||
rightType = TypeOrder.from_value(right).value
|
||||
|
||||
if leftType != rightType:
|
||||
if leftType < rightType:
|
||||
return -1
|
||||
return 1
|
||||
|
||||
value_type = left.WhichOneof("value_type")
|
||||
|
||||
if value_type == "null_value":
|
||||
return 0 # nulls are all equal
|
||||
elif value_type == "boolean_value":
|
||||
return cls._compare_to(left.boolean_value, right.boolean_value)
|
||||
elif value_type == "integer_value":
|
||||
return cls.compare_numbers(left, right)
|
||||
elif value_type == "double_value":
|
||||
return cls.compare_numbers(left, right)
|
||||
elif value_type == "timestamp_value":
|
||||
return cls.compare_timestamps(left, right)
|
||||
elif value_type == "string_value":
|
||||
return cls._compare_to(left.string_value, right.string_value)
|
||||
elif value_type == "bytes_value":
|
||||
return cls.compare_blobs(left, right)
|
||||
elif value_type == "reference_value":
|
||||
return cls.compare_resource_paths(left, right)
|
||||
elif value_type == "geo_point_value":
|
||||
return cls.compare_geo_points(left, right)
|
||||
elif value_type == "array_value":
|
||||
return cls.compare_arrays(left, right)
|
||||
elif value_type == "map_value":
|
||||
return cls.compare_objects(left, right)
|
||||
else:
|
||||
raise ValueError("Unknown ``value_type``", str(value_type))
|
||||
|
||||
@staticmethod
|
||||
def compare_blobs(left, right):
|
||||
left_bytes = left.bytes_value
|
||||
right_bytes = right.bytes_value
|
||||
|
||||
return Order._compare_to(left_bytes, right_bytes)
|
||||
|
||||
@staticmethod
|
||||
def compare_timestamps(left, right):
|
||||
left = left.timestamp_value
|
||||
right = right.timestamp_value
|
||||
|
||||
seconds = Order._compare_to(left.seconds or 0, right.seconds or 0)
|
||||
if seconds != 0:
|
||||
return seconds
|
||||
|
||||
return Order._compare_to(left.nanos or 0, right.nanos or 0)
|
||||
|
||||
@staticmethod
|
||||
def compare_geo_points(left, right):
|
||||
left_value = decode_value(left, None)
|
||||
right_value = decode_value(right, None)
|
||||
cmp = (left_value.latitude > right_value.latitude) - (
|
||||
left_value.latitude < right_value.latitude
|
||||
)
|
||||
|
||||
if cmp != 0:
|
||||
return cmp
|
||||
return (left_value.longitude > right_value.longitude) - (
|
||||
left_value.longitude < right_value.longitude
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def compare_resource_paths(left, right):
|
||||
left = left.reference_value
|
||||
right = right.reference_value
|
||||
|
||||
left_segments = left.split("/")
|
||||
right_segments = right.split("/")
|
||||
shorter = min(len(left_segments), len(right_segments))
|
||||
# compare segments
|
||||
for i in range(shorter):
|
||||
if left_segments[i] < right_segments[i]:
|
||||
return -1
|
||||
if left_segments[i] > right_segments[i]:
|
||||
return 1
|
||||
|
||||
left_length = len(left)
|
||||
right_length = len(right)
|
||||
return (left_length > right_length) - (left_length < right_length)
|
||||
|
||||
@staticmethod
|
||||
def compare_arrays(left, right):
|
||||
l_values = left.array_value.values
|
||||
r_values = right.array_value.values
|
||||
|
||||
length = min(len(l_values), len(r_values))
|
||||
for i in range(length):
|
||||
cmp = Order.compare(l_values[i], r_values[i])
|
||||
if cmp != 0:
|
||||
return cmp
|
||||
|
||||
return Order._compare_to(len(l_values), len(r_values))
|
||||
|
||||
@staticmethod
|
||||
def compare_objects(left, right):
|
||||
left_fields = left.map_value.fields
|
||||
right_fields = right.map_value.fields
|
||||
|
||||
for left_key, right_key in zip(sorted(left_fields), sorted(right_fields)):
|
||||
keyCompare = Order._compare_to(left_key, right_key)
|
||||
if keyCompare != 0:
|
||||
return keyCompare
|
||||
|
||||
value_compare = Order.compare(
|
||||
left_fields[left_key], right_fields[right_key]
|
||||
)
|
||||
if value_compare != 0:
|
||||
return value_compare
|
||||
|
||||
return Order._compare_to(len(left_fields), len(right_fields))
|
||||
|
||||
@staticmethod
|
||||
def compare_numbers(left, right):
|
||||
left_value = decode_value(left, None)
|
||||
right_value = decode_value(right, None)
|
||||
return Order.compare_doubles(left_value, right_value)
|
||||
|
||||
@staticmethod
|
||||
def compare_doubles(left, right):
|
||||
if math.isnan(left):
|
||||
if math.isnan(right):
|
||||
return 0
|
||||
return -1
|
||||
if math.isnan(right):
|
||||
return 1
|
||||
|
||||
return Order._compare_to(left, right)
|
||||
|
||||
@staticmethod
|
||||
def _compare_to(left, right):
|
||||
# We can't just use cmp(left, right) because cmp doesn't exist
|
||||
# in Python 3, so this is an equivalent suggested by
|
||||
# https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
|
||||
return (left > right) - (left < right)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "CommonProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A set of field paths on a document.
|
||||
// Used to restrict a get or update operation on a document to a subset of its
|
||||
// fields.
|
||||
// This is different from standard field masks, as this is always scoped to a
|
||||
// [Document][google.firestore.v1.Document], and takes in account the dynamic nature of [Value][google.firestore.v1.Value].
|
||||
message DocumentMask {
|
||||
// The list of field paths in the mask. See [Document.fields][google.firestore.v1.Document.fields] for a field
|
||||
// path syntax reference.
|
||||
repeated string field_paths = 1;
|
||||
}
|
||||
|
||||
// A precondition on a document, used for conditional operations.
|
||||
message Precondition {
|
||||
// The type of precondition.
|
||||
oneof condition_type {
|
||||
// When set to `true`, the target document must exist.
|
||||
// When set to `false`, the target document must not exist.
|
||||
bool exists = 1;
|
||||
|
||||
// When set, the target document must exist and have been last updated at
|
||||
// that time.
|
||||
google.protobuf.Timestamp update_time = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Options for creating a new transaction.
|
||||
message TransactionOptions {
|
||||
// Options for a transaction that can be used to read and write documents.
|
||||
message ReadWrite {
|
||||
// An optional transaction to retry.
|
||||
bytes retry_transaction = 1;
|
||||
}
|
||||
|
||||
// Options for a transaction that can only be used to read documents.
|
||||
message ReadOnly {
|
||||
// The consistency mode for this transaction. If not set, defaults to strong
|
||||
// consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents at the given time.
|
||||
// This may not be older than 60 seconds.
|
||||
google.protobuf.Timestamp read_time = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// The mode of the transaction.
|
||||
oneof mode {
|
||||
// The transaction can only be used for read operations.
|
||||
ReadOnly read_only = 2;
|
||||
|
||||
// The transaction can be used for both read and write operations.
|
||||
ReadWrite read_write = 3;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,456 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/cloud/firestore_v1/proto/common.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
|
||||
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
name="google/cloud/firestore_v1/proto/common.proto",
|
||||
package="google.firestore.v1",
|
||||
syntax="proto3",
|
||||
serialized_options=b"\n\027com.google.firestore.v1B\013CommonProtoP\001Z<google.golang.org/genproto/googleapis/firestore/v1;firestore\242\002\004GCFS\252\002\031Google.Cloud.Firestore.V1\312\002\031Google\\Cloud\\Firestore\\V1\352\002\034Google::Cloud::Firestore::V1",
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n,google/cloud/firestore_v1/proto/common.proto\x12\x13google.firestore.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"#\n\x0c\x44ocumentMask\x12\x13\n\x0b\x66ield_paths\x18\x01 \x03(\t"e\n\x0cPrecondition\x12\x10\n\x06\x65xists\x18\x01 \x01(\x08H\x00\x12\x31\n\x0bupdate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x10\n\x0e\x63ondition_type"\xa9\x02\n\x12TransactionOptions\x12\x45\n\tread_only\x18\x02 \x01(\x0b\x32\x30.google.firestore.v1.TransactionOptions.ReadOnlyH\x00\x12G\n\nread_write\x18\x03 \x01(\x0b\x32\x31.google.firestore.v1.TransactionOptions.ReadWriteH\x00\x1a&\n\tReadWrite\x12\x19\n\x11retry_transaction\x18\x01 \x01(\x0c\x1aS\n\x08ReadOnly\x12/\n\tread_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x42\x16\n\x14\x63onsistency_selectorB\x06\n\x04modeB\xc4\x01\n\x17\x63om.google.firestore.v1B\x0b\x43ommonProtoP\x01Z<google.golang.org/genproto/googleapis/firestore/v1;firestore\xa2\x02\x04GCFS\xaa\x02\x19Google.Cloud.Firestore.V1\xca\x02\x19Google\\Cloud\\Firestore\\V1\xea\x02\x1cGoogle::Cloud::Firestore::V1b\x06proto3',
|
||||
dependencies=[
|
||||
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
|
||||
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
_DOCUMENTMASK = _descriptor.Descriptor(
|
||||
name="DocumentMask",
|
||||
full_name="google.firestore.v1.DocumentMask",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="field_paths",
|
||||
full_name="google.firestore.v1.DocumentMask.field_paths",
|
||||
index=0,
|
||||
number=1,
|
||||
type=9,
|
||||
cpp_type=9,
|
||||
label=3,
|
||||
has_default_value=False,
|
||||
default_value=[],
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=132,
|
||||
serialized_end=167,
|
||||
)
|
||||
|
||||
|
||||
_PRECONDITION = _descriptor.Descriptor(
|
||||
name="Precondition",
|
||||
full_name="google.firestore.v1.Precondition",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="exists",
|
||||
full_name="google.firestore.v1.Precondition.exists",
|
||||
index=0,
|
||||
number=1,
|
||||
type=8,
|
||||
cpp_type=7,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=False,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="update_time",
|
||||
full_name="google.firestore.v1.Precondition.update_time",
|
||||
index=1,
|
||||
number=2,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
_descriptor.OneofDescriptor(
|
||||
name="condition_type",
|
||||
full_name="google.firestore.v1.Precondition.condition_type",
|
||||
index=0,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[],
|
||||
),
|
||||
],
|
||||
serialized_start=169,
|
||||
serialized_end=270,
|
||||
)
|
||||
|
||||
|
||||
_TRANSACTIONOPTIONS_READWRITE = _descriptor.Descriptor(
|
||||
name="ReadWrite",
|
||||
full_name="google.firestore.v1.TransactionOptions.ReadWrite",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="retry_transaction",
|
||||
full_name="google.firestore.v1.TransactionOptions.ReadWrite.retry_transaction",
|
||||
index=0,
|
||||
number=1,
|
||||
type=12,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"",
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=439,
|
||||
serialized_end=477,
|
||||
)
|
||||
|
||||
_TRANSACTIONOPTIONS_READONLY = _descriptor.Descriptor(
|
||||
name="ReadOnly",
|
||||
full_name="google.firestore.v1.TransactionOptions.ReadOnly",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="read_time",
|
||||
full_name="google.firestore.v1.TransactionOptions.ReadOnly.read_time",
|
||||
index=0,
|
||||
number=2,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
_descriptor.OneofDescriptor(
|
||||
name="consistency_selector",
|
||||
full_name="google.firestore.v1.TransactionOptions.ReadOnly.consistency_selector",
|
||||
index=0,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[],
|
||||
),
|
||||
],
|
||||
serialized_start=479,
|
||||
serialized_end=562,
|
||||
)
|
||||
|
||||
_TRANSACTIONOPTIONS = _descriptor.Descriptor(
|
||||
name="TransactionOptions",
|
||||
full_name="google.firestore.v1.TransactionOptions",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="read_only",
|
||||
full_name="google.firestore.v1.TransactionOptions.read_only",
|
||||
index=0,
|
||||
number=2,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="read_write",
|
||||
full_name="google.firestore.v1.TransactionOptions.read_write",
|
||||
index=1,
|
||||
number=3,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[_TRANSACTIONOPTIONS_READWRITE, _TRANSACTIONOPTIONS_READONLY,],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
_descriptor.OneofDescriptor(
|
||||
name="mode",
|
||||
full_name="google.firestore.v1.TransactionOptions.mode",
|
||||
index=0,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[],
|
||||
),
|
||||
],
|
||||
serialized_start=273,
|
||||
serialized_end=570,
|
||||
)
|
||||
|
||||
_PRECONDITION.fields_by_name[
|
||||
"update_time"
|
||||
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
|
||||
_PRECONDITION.oneofs_by_name["condition_type"].fields.append(
|
||||
_PRECONDITION.fields_by_name["exists"]
|
||||
)
|
||||
_PRECONDITION.fields_by_name["exists"].containing_oneof = _PRECONDITION.oneofs_by_name[
|
||||
"condition_type"
|
||||
]
|
||||
_PRECONDITION.oneofs_by_name["condition_type"].fields.append(
|
||||
_PRECONDITION.fields_by_name["update_time"]
|
||||
)
|
||||
_PRECONDITION.fields_by_name[
|
||||
"update_time"
|
||||
].containing_oneof = _PRECONDITION.oneofs_by_name["condition_type"]
|
||||
_TRANSACTIONOPTIONS_READWRITE.containing_type = _TRANSACTIONOPTIONS
|
||||
_TRANSACTIONOPTIONS_READONLY.fields_by_name[
|
||||
"read_time"
|
||||
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
|
||||
_TRANSACTIONOPTIONS_READONLY.containing_type = _TRANSACTIONOPTIONS
|
||||
_TRANSACTIONOPTIONS_READONLY.oneofs_by_name["consistency_selector"].fields.append(
|
||||
_TRANSACTIONOPTIONS_READONLY.fields_by_name["read_time"]
|
||||
)
|
||||
_TRANSACTIONOPTIONS_READONLY.fields_by_name[
|
||||
"read_time"
|
||||
].containing_oneof = _TRANSACTIONOPTIONS_READONLY.oneofs_by_name["consistency_selector"]
|
||||
_TRANSACTIONOPTIONS.fields_by_name[
|
||||
"read_only"
|
||||
].message_type = _TRANSACTIONOPTIONS_READONLY
|
||||
_TRANSACTIONOPTIONS.fields_by_name[
|
||||
"read_write"
|
||||
].message_type = _TRANSACTIONOPTIONS_READWRITE
|
||||
_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append(
|
||||
_TRANSACTIONOPTIONS.fields_by_name["read_only"]
|
||||
)
|
||||
_TRANSACTIONOPTIONS.fields_by_name[
|
||||
"read_only"
|
||||
].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"]
|
||||
_TRANSACTIONOPTIONS.oneofs_by_name["mode"].fields.append(
|
||||
_TRANSACTIONOPTIONS.fields_by_name["read_write"]
|
||||
)
|
||||
_TRANSACTIONOPTIONS.fields_by_name[
|
||||
"read_write"
|
||||
].containing_oneof = _TRANSACTIONOPTIONS.oneofs_by_name["mode"]
|
||||
DESCRIPTOR.message_types_by_name["DocumentMask"] = _DOCUMENTMASK
|
||||
DESCRIPTOR.message_types_by_name["Precondition"] = _PRECONDITION
|
||||
DESCRIPTOR.message_types_by_name["TransactionOptions"] = _TRANSACTIONOPTIONS
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
DocumentMask = _reflection.GeneratedProtocolMessageType(
|
||||
"DocumentMask",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _DOCUMENTMASK,
|
||||
"__module__": "google.cloud.firestore_v1.proto.common_pb2",
|
||||
"__doc__": """A set of field paths on a document. Used to restrict a get or update
|
||||
operation on a document to a subset of its fields. This is different
|
||||
from standard field masks, as this is always scoped to a
|
||||
[Document][google.firestore.v1.Document], and takes in account the
|
||||
dynamic nature of [Value][google.firestore.v1.Value].
|
||||
|
||||
Attributes:
|
||||
field_paths:
|
||||
The list of field paths in the mask. See
|
||||
[Document.fields][google.firestore.v1.Document.fields] for a
|
||||
field path syntax reference.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.DocumentMask)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(DocumentMask)
|
||||
|
||||
Precondition = _reflection.GeneratedProtocolMessageType(
|
||||
"Precondition",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _PRECONDITION,
|
||||
"__module__": "google.cloud.firestore_v1.proto.common_pb2",
|
||||
"__doc__": """A precondition on a document, used for conditional operations.
|
||||
|
||||
Attributes:
|
||||
condition_type:
|
||||
The type of precondition.
|
||||
exists:
|
||||
When set to ``true``, the target document must exist. When set
|
||||
to ``false``, the target document must not exist.
|
||||
update_time:
|
||||
When set, the target document must exist and have been last
|
||||
updated at that time.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.Precondition)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(Precondition)
|
||||
|
||||
TransactionOptions = _reflection.GeneratedProtocolMessageType(
|
||||
"TransactionOptions",
|
||||
(_message.Message,),
|
||||
{
|
||||
"ReadWrite": _reflection.GeneratedProtocolMessageType(
|
||||
"ReadWrite",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _TRANSACTIONOPTIONS_READWRITE,
|
||||
"__module__": "google.cloud.firestore_v1.proto.common_pb2",
|
||||
"__doc__": """Options for a transaction that can be used to read and write
|
||||
documents.
|
||||
|
||||
Attributes:
|
||||
retry_transaction:
|
||||
An optional transaction to retry.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.TransactionOptions.ReadWrite)
|
||||
},
|
||||
),
|
||||
"ReadOnly": _reflection.GeneratedProtocolMessageType(
|
||||
"ReadOnly",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _TRANSACTIONOPTIONS_READONLY,
|
||||
"__module__": "google.cloud.firestore_v1.proto.common_pb2",
|
||||
"__doc__": """Options for a transaction that can only be used to read documents.
|
||||
|
||||
Attributes:
|
||||
consistency_selector:
|
||||
The consistency mode for this transaction. If not set,
|
||||
defaults to strong consistency.
|
||||
read_time:
|
||||
Reads documents at the given time. This may not be older than
|
||||
60 seconds.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.TransactionOptions.ReadOnly)
|
||||
},
|
||||
),
|
||||
"DESCRIPTOR": _TRANSACTIONOPTIONS,
|
||||
"__module__": "google.cloud.firestore_v1.proto.common_pb2",
|
||||
"__doc__": """Options for creating a new transaction.
|
||||
|
||||
Attributes:
|
||||
mode:
|
||||
The mode of the transaction.
|
||||
read_only:
|
||||
The transaction can only be used for read operations.
|
||||
read_write:
|
||||
The transaction can be used for both read and write
|
||||
operations.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.TransactionOptions)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(TransactionOptions)
|
||||
_sym_db.RegisterMessage(TransactionOptions.ReadWrite)
|
||||
_sym_db.RegisterMessage(TransactionOptions.ReadOnly)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
# @@protoc_insertion_point(module_scope)
|
|
@ -0,0 +1,3 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
|
@ -0,0 +1,150 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/protobuf/struct.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/type/latlng.proto";
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DocumentProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A Firestore document.
|
||||
//
|
||||
// Must not exceed 1 MiB - 4 bytes.
|
||||
message Document {
|
||||
// The resource name of the document, for example
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1;
|
||||
|
||||
// The document's fields.
|
||||
//
|
||||
// The map keys represent field names.
|
||||
//
|
||||
// A simple field name contains only characters `a` to `z`, `A` to `Z`,
|
||||
// `0` to `9`, or `_`, and must not start with `0` to `9`. For example,
|
||||
// `foo_bar_17`.
|
||||
//
|
||||
// Field names matching the regular expression `__.*__` are reserved. Reserved
|
||||
// field names are forbidden except in certain documented contexts. The map
|
||||
// keys, represented as UTF-8, must not exceed 1,500 bytes and cannot be
|
||||
// empty.
|
||||
//
|
||||
// Field paths may be used in other contexts to refer to structured fields
|
||||
// defined here. For `map_value`, the field path is represented by the simple
|
||||
// or quoted field names of the containing fields, delimited by `.`. For
|
||||
// example, the structured field
|
||||
// `"foo" : { map_value: { "x&y" : { string_value: "hello" }}}` would be
|
||||
// represented by the field path `foo.x&y`.
|
||||
//
|
||||
// Within a field path, a quoted field name starts and ends with `` ` `` and
|
||||
// may contain any character. Some characters, including `` ` ``, must be
|
||||
// escaped using a `\`. For example, `` `x&y` `` represents `x&y` and
|
||||
// `` `bak\`tik` `` represents `` bak`tik ``.
|
||||
map<string, Value> fields = 2;
|
||||
|
||||
// Output only. The time at which the document was created.
|
||||
//
|
||||
// This value increases monotonically when a document is deleted then
|
||||
// recreated. It can also be compared to values from other documents and
|
||||
// the `read_time` of a query.
|
||||
google.protobuf.Timestamp create_time = 3;
|
||||
|
||||
// Output only. The time at which the document was last changed.
|
||||
//
|
||||
// This value is initially set to the `create_time` then increases
|
||||
// monotonically with each change to the document. It can also be
|
||||
// compared to values from other documents and the `read_time` of a query.
|
||||
google.protobuf.Timestamp update_time = 4;
|
||||
}
|
||||
|
||||
// A message that can hold any of the supported value types.
|
||||
message Value {
|
||||
// Must have a value set.
|
||||
oneof value_type {
|
||||
// A null value.
|
||||
google.protobuf.NullValue null_value = 11;
|
||||
|
||||
// A boolean value.
|
||||
bool boolean_value = 1;
|
||||
|
||||
// An integer value.
|
||||
int64 integer_value = 2;
|
||||
|
||||
// A double value.
|
||||
double double_value = 3;
|
||||
|
||||
// A timestamp value.
|
||||
//
|
||||
// Precise only to microseconds. When stored, any additional precision is
|
||||
// rounded down.
|
||||
google.protobuf.Timestamp timestamp_value = 10;
|
||||
|
||||
// A string value.
|
||||
//
|
||||
// The string, represented as UTF-8, must not exceed 1 MiB - 89 bytes.
|
||||
// Only the first 1,500 bytes of the UTF-8 representation are considered by
|
||||
// queries.
|
||||
string string_value = 17;
|
||||
|
||||
// A bytes value.
|
||||
//
|
||||
// Must not exceed 1 MiB - 89 bytes.
|
||||
// Only the first 1,500 bytes are considered by queries.
|
||||
bytes bytes_value = 18;
|
||||
|
||||
// A reference to a document. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string reference_value = 5;
|
||||
|
||||
// A geo point value representing a point on the surface of Earth.
|
||||
google.type.LatLng geo_point_value = 8;
|
||||
|
||||
// An array value.
|
||||
//
|
||||
// Cannot directly contain another array value, though can contain an
|
||||
// map which contains another array.
|
||||
ArrayValue array_value = 9;
|
||||
|
||||
// A map value.
|
||||
MapValue map_value = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// An array value.
|
||||
message ArrayValue {
|
||||
// Values in the array.
|
||||
repeated Value values = 1;
|
||||
}
|
||||
|
||||
// A map value.
|
||||
message MapValue {
|
||||
// The map's fields.
|
||||
//
|
||||
// The map keys represent field names. Field names matching the regular
|
||||
// expression `__.*__` are reserved. Reserved field names are forbidden except
|
||||
// in certain documented contexts. The map keys, represented as UTF-8, must
|
||||
// not exceed 1,500 bytes and cannot be empty.
|
||||
map<string, Value> fields = 1;
|
||||
}
|
|
@ -0,0 +1,815 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/cloud/firestore_v1/proto/document.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
|
||||
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
|
||||
from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2
|
||||
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor.FileDescriptor(
|
||||
name="google/cloud/firestore_v1/proto/document.proto",
|
||||
package="google.firestore.v1",
|
||||
syntax="proto3",
|
||||
serialized_options=b"\n\027com.google.firestore.v1B\rDocumentProtoP\001Z<google.golang.org/genproto/googleapis/firestore/v1;firestore\242\002\004GCFS\252\002\031Google.Cloud.Firestore.V1\312\002\031Google\\Cloud\\Firestore\\V1\352\002\034Google::Cloud::Firestore::V1",
|
||||
create_key=_descriptor._internal_create_key,
|
||||
serialized_pb=b'\n.google/cloud/firestore_v1/proto/document.proto\x12\x13google.firestore.v1\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x18google/type/latlng.proto\x1a\x1cgoogle/api/annotations.proto"\x80\x02\n\x08\x44ocument\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x39\n\x06\x66ields\x18\x02 \x03(\x0b\x32).google.firestore.v1.Document.FieldsEntry\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1aI\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.google.firestore.v1.Value:\x02\x38\x01"\xae\x03\n\x05Value\x12\x30\n\nnull_value\x18\x0b \x01(\x0e\x32\x1a.google.protobuf.NullValueH\x00\x12\x17\n\rboolean_value\x18\x01 \x01(\x08H\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x03 \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x16\n\x0cstring_value\x18\x11 \x01(\tH\x00\x12\x15\n\x0b\x62ytes_value\x18\x12 \x01(\x0cH\x00\x12\x19\n\x0freference_value\x18\x05 \x01(\tH\x00\x12.\n\x0fgeo_point_value\x18\x08 \x01(\x0b\x32\x13.google.type.LatLngH\x00\x12\x36\n\x0b\x61rray_value\x18\t \x01(\x0b\x32\x1f.google.firestore.v1.ArrayValueH\x00\x12\x32\n\tmap_value\x18\x06 \x01(\x0b\x32\x1d.google.firestore.v1.MapValueH\x00\x42\x0c\n\nvalue_type"8\n\nArrayValue\x12*\n\x06values\x18\x01 \x03(\x0b\x32\x1a.google.firestore.v1.Value"\x90\x01\n\x08MapValue\x12\x39\n\x06\x66ields\x18\x01 \x03(\x0b\x32).google.firestore.v1.MapValue.FieldsEntry\x1aI\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.google.firestore.v1.Value:\x02\x38\x01\x42\xc6\x01\n\x17\x63om.google.firestore.v1B\rDocumentProtoP\x01Z<google.golang.org/genproto/googleapis/firestore/v1;firestore\xa2\x02\x04GCFS\xaa\x02\x19Google.Cloud.Firestore.V1\xca\x02\x19Google\\Cloud\\Firestore\\V1\xea\x02\x1cGoogle::Cloud::Firestore::V1b\x06proto3',
|
||||
dependencies=[
|
||||
google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,
|
||||
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
|
||||
google_dot_type_dot_latlng__pb2.DESCRIPTOR,
|
||||
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
_DOCUMENT_FIELDSENTRY = _descriptor.Descriptor(
|
||||
name="FieldsEntry",
|
||||
full_name="google.firestore.v1.Document.FieldsEntry",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="key",
|
||||
full_name="google.firestore.v1.Document.FieldsEntry.key",
|
||||
index=0,
|
||||
number=1,
|
||||
type=9,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"".decode("utf-8"),
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="value",
|
||||
full_name="google.firestore.v1.Document.FieldsEntry.value",
|
||||
index=1,
|
||||
number=2,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=b"8\001",
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=374,
|
||||
serialized_end=447,
|
||||
)
|
||||
|
||||
_DOCUMENT = _descriptor.Descriptor(
|
||||
name="Document",
|
||||
full_name="google.firestore.v1.Document",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="name",
|
||||
full_name="google.firestore.v1.Document.name",
|
||||
index=0,
|
||||
number=1,
|
||||
type=9,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"".decode("utf-8"),
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="fields",
|
||||
full_name="google.firestore.v1.Document.fields",
|
||||
index=1,
|
||||
number=2,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=3,
|
||||
has_default_value=False,
|
||||
default_value=[],
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="create_time",
|
||||
full_name="google.firestore.v1.Document.create_time",
|
||||
index=2,
|
||||
number=3,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="update_time",
|
||||
full_name="google.firestore.v1.Document.update_time",
|
||||
index=3,
|
||||
number=4,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[_DOCUMENT_FIELDSENTRY,],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=191,
|
||||
serialized_end=447,
|
||||
)
|
||||
|
||||
|
||||
_VALUE = _descriptor.Descriptor(
|
||||
name="Value",
|
||||
full_name="google.firestore.v1.Value",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="null_value",
|
||||
full_name="google.firestore.v1.Value.null_value",
|
||||
index=0,
|
||||
number=11,
|
||||
type=14,
|
||||
cpp_type=8,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=0,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="boolean_value",
|
||||
full_name="google.firestore.v1.Value.boolean_value",
|
||||
index=1,
|
||||
number=1,
|
||||
type=8,
|
||||
cpp_type=7,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=False,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="integer_value",
|
||||
full_name="google.firestore.v1.Value.integer_value",
|
||||
index=2,
|
||||
number=2,
|
||||
type=3,
|
||||
cpp_type=2,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=0,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="double_value",
|
||||
full_name="google.firestore.v1.Value.double_value",
|
||||
index=3,
|
||||
number=3,
|
||||
type=1,
|
||||
cpp_type=5,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=float(0),
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="timestamp_value",
|
||||
full_name="google.firestore.v1.Value.timestamp_value",
|
||||
index=4,
|
||||
number=10,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="string_value",
|
||||
full_name="google.firestore.v1.Value.string_value",
|
||||
index=5,
|
||||
number=17,
|
||||
type=9,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"".decode("utf-8"),
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="bytes_value",
|
||||
full_name="google.firestore.v1.Value.bytes_value",
|
||||
index=6,
|
||||
number=18,
|
||||
type=12,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"",
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="reference_value",
|
||||
full_name="google.firestore.v1.Value.reference_value",
|
||||
index=7,
|
||||
number=5,
|
||||
type=9,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"".decode("utf-8"),
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="geo_point_value",
|
||||
full_name="google.firestore.v1.Value.geo_point_value",
|
||||
index=8,
|
||||
number=8,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="array_value",
|
||||
full_name="google.firestore.v1.Value.array_value",
|
||||
index=9,
|
||||
number=9,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="map_value",
|
||||
full_name="google.firestore.v1.Value.map_value",
|
||||
index=10,
|
||||
number=6,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[
|
||||
_descriptor.OneofDescriptor(
|
||||
name="value_type",
|
||||
full_name="google.firestore.v1.Value.value_type",
|
||||
index=0,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[],
|
||||
),
|
||||
],
|
||||
serialized_start=450,
|
||||
serialized_end=880,
|
||||
)
|
||||
|
||||
|
||||
_ARRAYVALUE = _descriptor.Descriptor(
|
||||
name="ArrayValue",
|
||||
full_name="google.firestore.v1.ArrayValue",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="values",
|
||||
full_name="google.firestore.v1.ArrayValue.values",
|
||||
index=0,
|
||||
number=1,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=3,
|
||||
has_default_value=False,
|
||||
default_value=[],
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=882,
|
||||
serialized_end=938,
|
||||
)
|
||||
|
||||
|
||||
_MAPVALUE_FIELDSENTRY = _descriptor.Descriptor(
|
||||
name="FieldsEntry",
|
||||
full_name="google.firestore.v1.MapValue.FieldsEntry",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="key",
|
||||
full_name="google.firestore.v1.MapValue.FieldsEntry.key",
|
||||
index=0,
|
||||
number=1,
|
||||
type=9,
|
||||
cpp_type=9,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=b"".decode("utf-8"),
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
_descriptor.FieldDescriptor(
|
||||
name="value",
|
||||
full_name="google.firestore.v1.MapValue.FieldsEntry.value",
|
||||
index=1,
|
||||
number=2,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=1,
|
||||
has_default_value=False,
|
||||
default_value=None,
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[],
|
||||
enum_types=[],
|
||||
serialized_options=b"8\001",
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=374,
|
||||
serialized_end=447,
|
||||
)
|
||||
|
||||
_MAPVALUE = _descriptor.Descriptor(
|
||||
name="MapValue",
|
||||
full_name="google.firestore.v1.MapValue",
|
||||
filename=None,
|
||||
file=DESCRIPTOR,
|
||||
containing_type=None,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
fields=[
|
||||
_descriptor.FieldDescriptor(
|
||||
name="fields",
|
||||
full_name="google.firestore.v1.MapValue.fields",
|
||||
index=0,
|
||||
number=1,
|
||||
type=11,
|
||||
cpp_type=10,
|
||||
label=3,
|
||||
has_default_value=False,
|
||||
default_value=[],
|
||||
message_type=None,
|
||||
enum_type=None,
|
||||
containing_type=None,
|
||||
is_extension=False,
|
||||
extension_scope=None,
|
||||
serialized_options=None,
|
||||
file=DESCRIPTOR,
|
||||
create_key=_descriptor._internal_create_key,
|
||||
),
|
||||
],
|
||||
extensions=[],
|
||||
nested_types=[_MAPVALUE_FIELDSENTRY,],
|
||||
enum_types=[],
|
||||
serialized_options=None,
|
||||
is_extendable=False,
|
||||
syntax="proto3",
|
||||
extension_ranges=[],
|
||||
oneofs=[],
|
||||
serialized_start=941,
|
||||
serialized_end=1085,
|
||||
)
|
||||
|
||||
_DOCUMENT_FIELDSENTRY.fields_by_name["value"].message_type = _VALUE
|
||||
_DOCUMENT_FIELDSENTRY.containing_type = _DOCUMENT
|
||||
_DOCUMENT.fields_by_name["fields"].message_type = _DOCUMENT_FIELDSENTRY
|
||||
_DOCUMENT.fields_by_name[
|
||||
"create_time"
|
||||
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
|
||||
_DOCUMENT.fields_by_name[
|
||||
"update_time"
|
||||
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
|
||||
_VALUE.fields_by_name[
|
||||
"null_value"
|
||||
].enum_type = google_dot_protobuf_dot_struct__pb2._NULLVALUE
|
||||
_VALUE.fields_by_name[
|
||||
"timestamp_value"
|
||||
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
|
||||
_VALUE.fields_by_name[
|
||||
"geo_point_value"
|
||||
].message_type = google_dot_type_dot_latlng__pb2._LATLNG
|
||||
_VALUE.fields_by_name["array_value"].message_type = _ARRAYVALUE
|
||||
_VALUE.fields_by_name["map_value"].message_type = _MAPVALUE
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["null_value"])
|
||||
_VALUE.fields_by_name["null_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(
|
||||
_VALUE.fields_by_name["boolean_value"]
|
||||
)
|
||||
_VALUE.fields_by_name["boolean_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(
|
||||
_VALUE.fields_by_name["integer_value"]
|
||||
)
|
||||
_VALUE.fields_by_name["integer_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["double_value"])
|
||||
_VALUE.fields_by_name["double_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(
|
||||
_VALUE.fields_by_name["timestamp_value"]
|
||||
)
|
||||
_VALUE.fields_by_name["timestamp_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["string_value"])
|
||||
_VALUE.fields_by_name["string_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["bytes_value"])
|
||||
_VALUE.fields_by_name["bytes_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(
|
||||
_VALUE.fields_by_name["reference_value"]
|
||||
)
|
||||
_VALUE.fields_by_name["reference_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(
|
||||
_VALUE.fields_by_name["geo_point_value"]
|
||||
)
|
||||
_VALUE.fields_by_name["geo_point_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["array_value"])
|
||||
_VALUE.fields_by_name["array_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_VALUE.oneofs_by_name["value_type"].fields.append(_VALUE.fields_by_name["map_value"])
|
||||
_VALUE.fields_by_name["map_value"].containing_oneof = _VALUE.oneofs_by_name[
|
||||
"value_type"
|
||||
]
|
||||
_ARRAYVALUE.fields_by_name["values"].message_type = _VALUE
|
||||
_MAPVALUE_FIELDSENTRY.fields_by_name["value"].message_type = _VALUE
|
||||
_MAPVALUE_FIELDSENTRY.containing_type = _MAPVALUE
|
||||
_MAPVALUE.fields_by_name["fields"].message_type = _MAPVALUE_FIELDSENTRY
|
||||
DESCRIPTOR.message_types_by_name["Document"] = _DOCUMENT
|
||||
DESCRIPTOR.message_types_by_name["Value"] = _VALUE
|
||||
DESCRIPTOR.message_types_by_name["ArrayValue"] = _ARRAYVALUE
|
||||
DESCRIPTOR.message_types_by_name["MapValue"] = _MAPVALUE
|
||||
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
|
||||
|
||||
Document = _reflection.GeneratedProtocolMessageType(
|
||||
"Document",
|
||||
(_message.Message,),
|
||||
{
|
||||
"FieldsEntry": _reflection.GeneratedProtocolMessageType(
|
||||
"FieldsEntry",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _DOCUMENT_FIELDSENTRY,
|
||||
"__module__": "google.cloud.firestore_v1.proto.document_pb2"
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.Document.FieldsEntry)
|
||||
},
|
||||
),
|
||||
"DESCRIPTOR": _DOCUMENT,
|
||||
"__module__": "google.cloud.firestore_v1.proto.document_pb2",
|
||||
"__doc__": """A Firestore document. Must not exceed 1 MiB - 4 bytes.
|
||||
|
||||
Attributes:
|
||||
name:
|
||||
The resource name of the document, for example ``projects/{pro
|
||||
ject_id}/databases/{database_id}/documents/{document_path}``.
|
||||
fields:
|
||||
The document’s fields. The map keys represent field names. A
|
||||
simple field name contains only characters ``a`` to ``z``,
|
||||
``A`` to ``Z``, ``0`` to ``9``, or ``_``, and must not start
|
||||
with ``0`` to ``9``. For example, ``foo_bar_17``. Field names
|
||||
matching the regular expression ``__.*__`` are reserved.
|
||||
Reserved field names are forbidden except in certain
|
||||
documented contexts. The map keys, represented as UTF-8, must
|
||||
not exceed 1,500 bytes and cannot be empty. Field paths may
|
||||
be used in other contexts to refer to structured fields
|
||||
defined here. For ``map_value``, the field path is represented
|
||||
by the simple or quoted field names of the containing fields,
|
||||
delimited by ``.``. For example, the structured field ``"foo"
|
||||
: { map_value: { "x&y" : { string_value: "hello" }}}`` would
|
||||
be represented by the field path ``foo.x&y``. Within a field
|
||||
path, a quoted field name starts and ends with :literal:`\``
|
||||
and may contain any character. Some characters, including
|
||||
:literal:`\``, must be escaped using a ``\``. For example,
|
||||
:literal:`\`x&y\`` represents ``x&y`` and
|
||||
:literal:`\`bak\`tik\`` represents :literal:`bak`tik`.
|
||||
create_time:
|
||||
Output only. The time at which the document was created. This
|
||||
value increases monotonically when a document is deleted then
|
||||
recreated. It can also be compared to values from other
|
||||
documents and the ``read_time`` of a query.
|
||||
update_time:
|
||||
Output only. The time at which the document was last changed.
|
||||
This value is initially set to the ``create_time`` then
|
||||
increases monotonically with each change to the document. It
|
||||
can also be compared to values from other documents and the
|
||||
``read_time`` of a query.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.Document)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(Document)
|
||||
_sym_db.RegisterMessage(Document.FieldsEntry)
|
||||
|
||||
Value = _reflection.GeneratedProtocolMessageType(
|
||||
"Value",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _VALUE,
|
||||
"__module__": "google.cloud.firestore_v1.proto.document_pb2",
|
||||
"__doc__": """A message that can hold any of the supported value types.
|
||||
|
||||
Attributes:
|
||||
value_type:
|
||||
Must have a value set.
|
||||
null_value:
|
||||
A null value.
|
||||
boolean_value:
|
||||
A boolean value.
|
||||
integer_value:
|
||||
An integer value.
|
||||
double_value:
|
||||
A double value.
|
||||
timestamp_value:
|
||||
A timestamp value. Precise only to microseconds. When stored,
|
||||
any additional precision is rounded down.
|
||||
string_value:
|
||||
A string value. The string, represented as UTF-8, must not
|
||||
exceed 1 MiB - 89 bytes. Only the first 1,500 bytes of the
|
||||
UTF-8 representation are considered by queries.
|
||||
bytes_value:
|
||||
A bytes value. Must not exceed 1 MiB - 89 bytes. Only the
|
||||
first 1,500 bytes are considered by queries.
|
||||
reference_value:
|
||||
A reference to a document. For example: ``projects/{project_id
|
||||
}/databases/{database_id}/documents/{document_path}``.
|
||||
geo_point_value:
|
||||
A geo point value representing a point on the surface of
|
||||
Earth.
|
||||
array_value:
|
||||
An array value. Cannot directly contain another array value,
|
||||
though can contain an map which contains another array.
|
||||
map_value:
|
||||
A map value.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.Value)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(Value)
|
||||
|
||||
ArrayValue = _reflection.GeneratedProtocolMessageType(
|
||||
"ArrayValue",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _ARRAYVALUE,
|
||||
"__module__": "google.cloud.firestore_v1.proto.document_pb2",
|
||||
"__doc__": """An array value.
|
||||
|
||||
Attributes:
|
||||
values:
|
||||
Values in the array.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.ArrayValue)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(ArrayValue)
|
||||
|
||||
MapValue = _reflection.GeneratedProtocolMessageType(
|
||||
"MapValue",
|
||||
(_message.Message,),
|
||||
{
|
||||
"FieldsEntry": _reflection.GeneratedProtocolMessageType(
|
||||
"FieldsEntry",
|
||||
(_message.Message,),
|
||||
{
|
||||
"DESCRIPTOR": _MAPVALUE_FIELDSENTRY,
|
||||
"__module__": "google.cloud.firestore_v1.proto.document_pb2"
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.MapValue.FieldsEntry)
|
||||
},
|
||||
),
|
||||
"DESCRIPTOR": _MAPVALUE,
|
||||
"__module__": "google.cloud.firestore_v1.proto.document_pb2",
|
||||
"__doc__": """A map value.
|
||||
|
||||
Attributes:
|
||||
fields:
|
||||
The map’s fields. The map keys represent field names. Field
|
||||
names matching the regular expression ``__.*__`` are reserved.
|
||||
Reserved field names are forbidden except in certain
|
||||
documented contexts. The map keys, represented as UTF-8, must
|
||||
not exceed 1,500 bytes and cannot be empty.
|
||||
""",
|
||||
# @@protoc_insertion_point(class_scope:google.firestore.v1.MapValue)
|
||||
},
|
||||
)
|
||||
_sym_db.RegisterMessage(MapValue)
|
||||
_sym_db.RegisterMessage(MapValue.FieldsEntry)
|
||||
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_DOCUMENT_FIELDSENTRY._options = None
|
||||
_MAPVALUE_FIELDSENTRY._options = None
|
||||
# @@protoc_insertion_point(module_scope)
|
|
@ -0,0 +1,3 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
|
@ -0,0 +1,896 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/api/client.proto";
|
||||
import "google/api/field_behavior.proto";
|
||||
import "google/firestore/v1/common.proto";
|
||||
import "google/firestore/v1/document.proto";
|
||||
import "google/firestore/v1/query.proto";
|
||||
import "google/firestore/v1/write.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/rpc/status.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "FirestoreProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// Specification of the Firestore API.
|
||||
|
||||
// The Cloud Firestore service.
|
||||
//
|
||||
// Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
|
||||
// document database that simplifies storing, syncing, and querying data for
|
||||
// your mobile, web, and IoT apps at global scale. Its client libraries provide
|
||||
// live synchronization and offline support, while its security features and
|
||||
// integrations with Firebase and Google Cloud Platform (GCP) accelerate
|
||||
// building truly serverless apps.
|
||||
service Firestore {
|
||||
option (google.api.default_host) = "firestore.googleapis.com";
|
||||
option (google.api.oauth_scopes) =
|
||||
"https://www.googleapis.com/auth/cloud-platform,"
|
||||
"https://www.googleapis.com/auth/datastore";
|
||||
|
||||
// Gets a single document.
|
||||
rpc GetDocument(GetDocumentRequest) returns (Document) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{name=projects/*/databases/*/documents/*/**}"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists documents.
|
||||
rpc ListDocuments(ListDocumentsRequest) returns (ListDocumentsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
|
||||
};
|
||||
}
|
||||
|
||||
// Updates or inserts a document.
|
||||
rpc UpdateDocument(UpdateDocumentRequest) returns (Document) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v1/{document.name=projects/*/databases/*/documents/*/**}"
|
||||
body: "document"
|
||||
};
|
||||
option (google.api.method_signature) = "document,update_mask";
|
||||
}
|
||||
|
||||
// Deletes a document.
|
||||
rpc DeleteDocument(DeleteDocumentRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v1/{name=projects/*/databases/*/documents/*/**}"
|
||||
};
|
||||
option (google.api.method_signature) = "name";
|
||||
}
|
||||
|
||||
// Gets multiple documents.
|
||||
//
|
||||
// Documents returned by this method are not guaranteed to be returned in the
|
||||
// same order that they were requested.
|
||||
rpc BatchGetDocuments(BatchGetDocumentsRequest) returns (stream BatchGetDocumentsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:batchGet"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Starts a new transaction.
|
||||
rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:beginTransaction"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "database";
|
||||
}
|
||||
|
||||
// Commits a transaction, while optionally updating documents.
|
||||
rpc Commit(CommitRequest) returns (CommitResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:commit"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "database,writes";
|
||||
}
|
||||
|
||||
// Rolls back a transaction.
|
||||
rpc Rollback(RollbackRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:rollback"
|
||||
body: "*"
|
||||
};
|
||||
option (google.api.method_signature) = "database,transaction";
|
||||
}
|
||||
|
||||
// Runs a query.
|
||||
rpc RunQuery(RunQueryRequest) returns (stream RunQueryResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents}:runQuery"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Partitions a query by returning partition cursors that can be used to run
|
||||
// the query in parallel. The returned partition cursors are split points that
|
||||
// can be used by RunQuery as starting/end points for the query results.
|
||||
rpc PartitionQuery(PartitionQueryRequest) returns (PartitionQueryResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents}:partitionQuery"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Streams batches of document updates and deletes, in order.
|
||||
rpc Write(stream WriteRequest) returns (stream WriteResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:write"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Listens to changes.
|
||||
rpc Listen(stream ListenRequest) returns (stream ListenResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:listen"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists all the collection IDs underneath a document.
|
||||
rpc ListCollectionIds(ListCollectionIdsRequest) returns (ListCollectionIdsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
option (google.api.method_signature) = "parent";
|
||||
}
|
||||
|
||||
// Applies a batch of write operations.
|
||||
//
|
||||
// The BatchWrite method does not apply the write operations atomically
|
||||
// and can apply them out of order. Method does not allow more than one write
|
||||
// per document. Each write succeeds or fails independently. See the
|
||||
// [BatchWriteResponse][google.firestore.v1.BatchWriteResponse] for the success status of each write.
|
||||
//
|
||||
// If you require an atomically applied set of writes, use
|
||||
// [Commit][google.firestore.v1.Firestore.Commit] instead.
|
||||
rpc BatchWrite(BatchWriteRequest) returns (BatchWriteResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{database=projects/*/databases/*}/documents:batchWrite"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates a new document.
|
||||
rpc CreateDocument(CreateDocumentRequest) returns (Document) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}"
|
||||
body: "document"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// The request for [Firestore.GetDocument][google.firestore.v1.Firestore.GetDocument].
|
||||
message GetDocumentRequest {
|
||||
// Required. The resource name of the Document to get. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If the document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 2;
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads the document in a transaction.
|
||||
bytes transaction = 3;
|
||||
|
||||
// Reads the version of the document at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// The request for [Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments].
|
||||
message ListDocumentsRequest {
|
||||
// Required. The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents` or
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`
|
||||
// or `messages`.
|
||||
string collection_id = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The maximum number of documents to return.
|
||||
int32 page_size = 3;
|
||||
|
||||
// The `next_page_token` value returned from a previous List request, if any.
|
||||
string page_token = 4;
|
||||
|
||||
// The order to sort results by. For example: `priority desc, name`.
|
||||
string order_by = 6;
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If a document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 7;
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents in a transaction.
|
||||
bytes transaction = 8;
|
||||
|
||||
// Reads documents as they were at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 10;
|
||||
}
|
||||
|
||||
// If the list should show missing documents. A missing document is a
|
||||
// document that does not exist but has sub-documents. These documents will
|
||||
// be returned with a key but will not have fields, [Document.create_time][google.firestore.v1.Document.create_time],
|
||||
// or [Document.update_time][google.firestore.v1.Document.update_time] set.
|
||||
//
|
||||
// Requests with `show_missing` may not specify `where` or
|
||||
// `order_by`.
|
||||
bool show_missing = 12;
|
||||
}
|
||||
|
||||
// The response for [Firestore.ListDocuments][google.firestore.v1.Firestore.ListDocuments].
|
||||
message ListDocumentsResponse {
|
||||
// The Documents found.
|
||||
repeated Document documents = 1;
|
||||
|
||||
// The next page token.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.CreateDocument][google.firestore.v1.Firestore.CreateDocument].
|
||||
message CreateDocumentRequest {
|
||||
// Required. The parent resource. For example:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/chatrooms/{chatroom_id}`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The collection ID, relative to `parent`, to list. For example: `chatrooms`.
|
||||
string collection_id = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The client-assigned document ID to use for this document.
|
||||
//
|
||||
// Optional. If not specified, an ID will be assigned by the service.
|
||||
string document_id = 3;
|
||||
|
||||
// Required. The document to create. `name` must not be set.
|
||||
Document document = 4 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If the document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 5;
|
||||
}
|
||||
|
||||
// The request for [Firestore.UpdateDocument][google.firestore.v1.Firestore.UpdateDocument].
|
||||
message UpdateDocumentRequest {
|
||||
// Required. The updated document.
|
||||
// Creates the document if it does not already exist.
|
||||
Document document = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The fields to update.
|
||||
// None of the field paths in the mask may contain a reserved name.
|
||||
//
|
||||
// If the document exists on the server and has fields not referenced in the
|
||||
// mask, they are left unchanged.
|
||||
// Fields referenced in the mask, but not present in the input document, are
|
||||
// deleted from the document on the server.
|
||||
DocumentMask update_mask = 2;
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If the document has a field that is not present in this mask, that field
|
||||
// will not be returned in the response.
|
||||
DocumentMask mask = 3;
|
||||
|
||||
// An optional precondition on the document.
|
||||
// The request will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 4;
|
||||
}
|
||||
|
||||
// The request for [Firestore.DeleteDocument][google.firestore.v1.Firestore.DeleteDocument].
|
||||
message DeleteDocumentRequest {
|
||||
// Required. The resource name of the Document to delete. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string name = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// An optional precondition on the document.
|
||||
// The request will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments].
|
||||
message BatchGetDocumentsRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The names of the documents to retrieve. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// The request will fail if any of the document is not a child resource of the
|
||||
// given `database`. Duplicate names will be elided.
|
||||
repeated string documents = 2;
|
||||
|
||||
// The fields to return. If not set, returns all fields.
|
||||
//
|
||||
// If a document has a field that is not present in this mask, that field will
|
||||
// not be returned in the response.
|
||||
DocumentMask mask = 3;
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents in a transaction.
|
||||
bytes transaction = 4;
|
||||
|
||||
// Starts a new transaction and reads the documents.
|
||||
// Defaults to a read-only transaction.
|
||||
// The new transaction ID will be returned as the first response in the
|
||||
// stream.
|
||||
TransactionOptions new_transaction = 5;
|
||||
|
||||
// Reads documents as they were at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The streamed response for [Firestore.BatchGetDocuments][google.firestore.v1.Firestore.BatchGetDocuments].
|
||||
message BatchGetDocumentsResponse {
|
||||
// A single result.
|
||||
// This can be empty if the server is just returning a transaction.
|
||||
oneof result {
|
||||
// A document that was requested.
|
||||
Document found = 1;
|
||||
|
||||
// A document name that was requested but does not exist. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string missing = 2;
|
||||
}
|
||||
|
||||
// The transaction that was started as part of this request.
|
||||
// Will only be set in the first response, and only if
|
||||
// [BatchGetDocumentsRequest.new_transaction][google.firestore.v1.BatchGetDocumentsRequest.new_transaction] was set in the request.
|
||||
bytes transaction = 3;
|
||||
|
||||
// The time at which the document was read.
|
||||
// This may be monotically increasing, in this case the previous documents in
|
||||
// the result stream are guaranteed not to have changed between their
|
||||
// read_time and this one.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// The request for [Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction].
|
||||
message BeginTransactionRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The options for the transaction.
|
||||
// Defaults to a read-write transaction.
|
||||
TransactionOptions options = 2;
|
||||
}
|
||||
|
||||
// The response for [Firestore.BeginTransaction][google.firestore.v1.Firestore.BeginTransaction].
|
||||
message BeginTransactionResponse {
|
||||
// The transaction that was started.
|
||||
bytes transaction = 1;
|
||||
}
|
||||
|
||||
// The request for [Firestore.Commit][google.firestore.v1.Firestore.Commit].
|
||||
message CommitRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The writes to apply.
|
||||
//
|
||||
// Always executed atomically and in order.
|
||||
repeated Write writes = 2;
|
||||
|
||||
// If set, applies all writes in this transaction, and commits it.
|
||||
bytes transaction = 3;
|
||||
}
|
||||
|
||||
// The response for [Firestore.Commit][google.firestore.v1.Firestore.Commit].
|
||||
message CommitResponse {
|
||||
// The result of applying the writes.
|
||||
//
|
||||
// This i-th write result corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated WriteResult write_results = 1;
|
||||
|
||||
// The time at which the commit occurred. Any read with an equal or greater
|
||||
// `read_time` is guaranteed to see the effects of the commit.
|
||||
google.protobuf.Timestamp commit_time = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.Rollback][google.firestore.v1.Firestore.Rollback].
|
||||
message RollbackRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// Required. The transaction to roll back.
|
||||
bytes transaction = 2 [(google.api.field_behavior) = REQUIRED];
|
||||
}
|
||||
|
||||
// The request for [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery].
|
||||
message RunQueryRequest {
|
||||
// Required. The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents` or
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The query to run.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
StructuredQuery structured_query = 2;
|
||||
}
|
||||
|
||||
// The consistency mode for this transaction.
|
||||
// If not set, defaults to strong consistency.
|
||||
oneof consistency_selector {
|
||||
// Reads documents in a transaction.
|
||||
bytes transaction = 5;
|
||||
|
||||
// Starts a new transaction and reads the documents.
|
||||
// Defaults to a read-only transaction.
|
||||
// The new transaction ID will be returned as the first response in the
|
||||
// stream.
|
||||
TransactionOptions new_transaction = 6;
|
||||
|
||||
// Reads documents as they were at the given time.
|
||||
// This may not be older than 270 seconds.
|
||||
google.protobuf.Timestamp read_time = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The response for [Firestore.RunQuery][google.firestore.v1.Firestore.RunQuery].
|
||||
message RunQueryResponse {
|
||||
// The transaction that was started as part of this request.
|
||||
// Can only be set in the first response, and only if
|
||||
// [RunQueryRequest.new_transaction][google.firestore.v1.RunQueryRequest.new_transaction] was set in the request.
|
||||
// If set, no other fields will be set in this response.
|
||||
bytes transaction = 2;
|
||||
|
||||
// A query result.
|
||||
// Not set when reporting partial progress.
|
||||
Document document = 1;
|
||||
|
||||
// The time at which the document was read. This may be monotonically
|
||||
// increasing; in this case, the previous documents in the result stream are
|
||||
// guaranteed not to have changed between their `read_time` and this one.
|
||||
//
|
||||
// If the query returns no results, a response with `read_time` and no
|
||||
// `document` will be sent, and this represents the time at which the query
|
||||
// was run.
|
||||
google.protobuf.Timestamp read_time = 3;
|
||||
|
||||
// The number of results that have been skipped due to an offset between
|
||||
// the last response and the current response.
|
||||
int32 skipped_results = 4;
|
||||
}
|
||||
|
||||
// The request for [Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery].
|
||||
message PartitionQueryRequest {
|
||||
// Required. The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents`.
|
||||
// Document resource names are not supported; only database resource names
|
||||
// can be specified.
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The query to partition.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
// Filters, order bys, limits, offsets, and start/end cursors are not
|
||||
// supported.
|
||||
StructuredQuery structured_query = 2;
|
||||
}
|
||||
|
||||
// The desired maximum number of partition points.
|
||||
// The partitions may be returned across multiple pages of results.
|
||||
// The number must be strictly positive. The actual number of partitions
|
||||
// returned may be fewer.
|
||||
//
|
||||
// For example, this may be set to one fewer than the number of parallel
|
||||
// queries to be run, or in running a data pipeline job, one fewer than the
|
||||
// number of workers or compute instances available.
|
||||
int64 partition_count = 3;
|
||||
|
||||
// The `next_page_token` value returned from a previous call to
|
||||
// PartitionQuery that may be used to get an additional set of results.
|
||||
// There are no ordering guarantees between sets of results. Thus, using
|
||||
// multiple sets of results will require merging the different result sets.
|
||||
//
|
||||
// For example, two subsequent calls using a page_token may return:
|
||||
//
|
||||
// * cursor B, cursor M, cursor Q
|
||||
// * cursor A, cursor U, cursor W
|
||||
//
|
||||
// To obtain a complete result set ordered with respect to the results of the
|
||||
// query supplied to PartitionQuery, the results sets should be merged:
|
||||
// cursor A, cursor B, cursor M, cursor Q, cursor U, cursor W
|
||||
string page_token = 4;
|
||||
|
||||
// The maximum number of partitions to return in this call, subject to
|
||||
// `partition_count`.
|
||||
//
|
||||
// For example, if `partition_count` = 10 and `page_size` = 8, the first call
|
||||
// to PartitionQuery will return up to 8 partitions and a `next_page_token`
|
||||
// if more results exist. A second call to PartitionQuery will return up to
|
||||
// 2 partitions, to complete the total of 10 specified in `partition_count`.
|
||||
int32 page_size = 5;
|
||||
}
|
||||
|
||||
// The response for [Firestore.PartitionQuery][google.firestore.v1.Firestore.PartitionQuery].
|
||||
message PartitionQueryResponse {
|
||||
// Partition results.
|
||||
// Each partition is a split point that can be used by RunQuery as a starting
|
||||
// or end point for the query results. The RunQuery requests must be made with
|
||||
// the same query supplied to this PartitionQuery request. The partition
|
||||
// cursors will be ordered according to same ordering as the results of the
|
||||
// query supplied to PartitionQuery.
|
||||
//
|
||||
// For example, if a PartitionQuery request returns partition cursors A and B,
|
||||
// running the following three queries will return the entire result set of
|
||||
// the original query:
|
||||
//
|
||||
// * query, end_at A
|
||||
// * query, start_at A, end_at B
|
||||
// * query, start_at B
|
||||
repeated Cursor partitions = 1;
|
||||
|
||||
// A page token that may be used to request an additional set of results, up
|
||||
// to the number specified by `partition_count` in the PartitionQuery request.
|
||||
// If blank, there are no more results.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.Write][google.firestore.v1.Firestore.Write].
|
||||
//
|
||||
// The first request creates a stream, or resumes an existing one from a token.
|
||||
//
|
||||
// When creating a new stream, the server replies with a response containing
|
||||
// only an ID and a token, to use in the next request.
|
||||
//
|
||||
// When resuming a stream, the server first streams any responses later than the
|
||||
// given token, then a response containing only an up-to-date token, to use in
|
||||
// the next request.
|
||||
message WriteRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
// This is only required in the first message.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The ID of the write stream to resume.
|
||||
// This may only be set in the first message. When left empty, a new write
|
||||
// stream will be created.
|
||||
string stream_id = 2;
|
||||
|
||||
// The writes to apply.
|
||||
//
|
||||
// Always executed atomically and in order.
|
||||
// This must be empty on the first request.
|
||||
// This may be empty on the last request.
|
||||
// This must not be empty on all other requests.
|
||||
repeated Write writes = 3;
|
||||
|
||||
// A stream token that was previously sent by the server.
|
||||
//
|
||||
// The client should set this field to the token from the most recent
|
||||
// [WriteResponse][google.firestore.v1.WriteResponse] it has received. This acknowledges that the client has
|
||||
// received responses up to this token. After sending this token, earlier
|
||||
// tokens may not be used anymore.
|
||||
//
|
||||
// The server may close the stream if there are too many unacknowledged
|
||||
// responses.
|
||||
//
|
||||
// Leave this field unset when creating a new stream. To resume a stream at
|
||||
// a specific point, set this field and the `stream_id` field.
|
||||
//
|
||||
// Leave this field unset when creating a new stream.
|
||||
bytes stream_token = 4;
|
||||
|
||||
// Labels associated with this write request.
|
||||
map<string, string> labels = 5;
|
||||
}
|
||||
|
||||
// The response for [Firestore.Write][google.firestore.v1.Firestore.Write].
|
||||
message WriteResponse {
|
||||
// The ID of the stream.
|
||||
// Only set on the first message, when a new stream was created.
|
||||
string stream_id = 1;
|
||||
|
||||
// A token that represents the position of this response in the stream.
|
||||
// This can be used by a client to resume the stream at this point.
|
||||
//
|
||||
// This field is always set.
|
||||
bytes stream_token = 2;
|
||||
|
||||
// The result of applying the writes.
|
||||
//
|
||||
// This i-th write result corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated WriteResult write_results = 3;
|
||||
|
||||
// The time at which the commit occurred. Any read with an equal or greater
|
||||
// `read_time` is guaranteed to see the effects of the write.
|
||||
google.protobuf.Timestamp commit_time = 4;
|
||||
}
|
||||
|
||||
// A request for [Firestore.Listen][google.firestore.v1.Firestore.Listen]
|
||||
message ListenRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The supported target changes.
|
||||
oneof target_change {
|
||||
// A target to add to this stream.
|
||||
Target add_target = 2;
|
||||
|
||||
// The ID of a target to remove from this stream.
|
||||
int32 remove_target = 3;
|
||||
}
|
||||
|
||||
// Labels associated with this target change.
|
||||
map<string, string> labels = 4;
|
||||
}
|
||||
|
||||
// The response for [Firestore.Listen][google.firestore.v1.Firestore.Listen].
|
||||
message ListenResponse {
|
||||
// The supported responses.
|
||||
oneof response_type {
|
||||
// Targets have changed.
|
||||
TargetChange target_change = 2;
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has changed.
|
||||
DocumentChange document_change = 3;
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has been deleted.
|
||||
DocumentDelete document_delete = 4;
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has been removed from a target (because it is no longer
|
||||
// relevant to that target).
|
||||
DocumentRemove document_remove = 6;
|
||||
|
||||
// A filter to apply to the set of documents previously returned for the
|
||||
// given target.
|
||||
//
|
||||
// Returned when documents may have been removed from the given target, but
|
||||
// the exact documents are unknown.
|
||||
ExistenceFilter filter = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// A specification of a set of documents to listen to.
|
||||
message Target {
|
||||
// A target specified by a set of documents names.
|
||||
message DocumentsTarget {
|
||||
// The names of the documents to retrieve. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// The request will fail if any of the document is not a child resource of
|
||||
// the given `database`. Duplicate names will be elided.
|
||||
repeated string documents = 2;
|
||||
}
|
||||
|
||||
// A target specified by a query.
|
||||
message QueryTarget {
|
||||
// The parent resource name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents` or
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents` or
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1;
|
||||
|
||||
// The query to run.
|
||||
oneof query_type {
|
||||
// A structured query.
|
||||
StructuredQuery structured_query = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// The type of target to listen to.
|
||||
oneof target_type {
|
||||
// A target specified by a query.
|
||||
QueryTarget query = 2;
|
||||
|
||||
// A target specified by a set of document names.
|
||||
DocumentsTarget documents = 3;
|
||||
}
|
||||
|
||||
// When to start listening.
|
||||
//
|
||||
// If not specified, all matching Documents are returned before any
|
||||
// subsequent changes.
|
||||
oneof resume_type {
|
||||
// A resume token from a prior [TargetChange][google.firestore.v1.TargetChange] for an identical target.
|
||||
//
|
||||
// Using a resume token with a different target is unsupported and may fail.
|
||||
bytes resume_token = 4;
|
||||
|
||||
// Start listening after a specific `read_time`.
|
||||
//
|
||||
// The client must know the state of matching documents at this time.
|
||||
google.protobuf.Timestamp read_time = 11;
|
||||
}
|
||||
|
||||
// The target ID that identifies the target on the stream. Must be a positive
|
||||
// number and non-zero.
|
||||
int32 target_id = 5;
|
||||
|
||||
// If the target should be removed once it is current and consistent.
|
||||
bool once = 6;
|
||||
}
|
||||
|
||||
// Targets being watched have changed.
|
||||
message TargetChange {
|
||||
// The type of change.
|
||||
enum TargetChangeType {
|
||||
// No change has occurred. Used only to send an updated `resume_token`.
|
||||
NO_CHANGE = 0;
|
||||
|
||||
// The targets have been added.
|
||||
ADD = 1;
|
||||
|
||||
// The targets have been removed.
|
||||
REMOVE = 2;
|
||||
|
||||
// The targets reflect all changes committed before the targets were added
|
||||
// to the stream.
|
||||
//
|
||||
// This will be sent after or with a `read_time` that is greater than or
|
||||
// equal to the time at which the targets were added.
|
||||
//
|
||||
// Listeners can wait for this change if read-after-write semantics
|
||||
// are desired.
|
||||
CURRENT = 3;
|
||||
|
||||
// The targets have been reset, and a new initial state for the targets
|
||||
// will be returned in subsequent changes.
|
||||
//
|
||||
// After the initial state is complete, `CURRENT` will be returned even
|
||||
// if the target was previously indicated to be `CURRENT`.
|
||||
RESET = 4;
|
||||
}
|
||||
|
||||
// The type of change that occurred.
|
||||
TargetChangeType target_change_type = 1;
|
||||
|
||||
// The target IDs of targets that have changed.
|
||||
//
|
||||
// If empty, the change applies to all targets.
|
||||
//
|
||||
// The order of the target IDs is not defined.
|
||||
repeated int32 target_ids = 2;
|
||||
|
||||
// The error that resulted in this change, if applicable.
|
||||
google.rpc.Status cause = 3;
|
||||
|
||||
// A token that can be used to resume the stream for the given `target_ids`,
|
||||
// or all targets if `target_ids` is empty.
|
||||
//
|
||||
// Not set on every target change.
|
||||
bytes resume_token = 4;
|
||||
|
||||
// The consistent `read_time` for the given `target_ids` (omitted when the
|
||||
// target_ids are not at a consistent snapshot).
|
||||
//
|
||||
// The stream is guaranteed to send a `read_time` with `target_ids` empty
|
||||
// whenever the entire stream reaches a new consistent snapshot. ADD,
|
||||
// CURRENT, and RESET messages are guaranteed to (eventually) result in a
|
||||
// new consistent snapshot (while NO_CHANGE and REMOVE messages are not).
|
||||
//
|
||||
// For a given stream, `read_time` is guaranteed to be monotonically
|
||||
// increasing.
|
||||
google.protobuf.Timestamp read_time = 6;
|
||||
}
|
||||
|
||||
// The request for [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds].
|
||||
message ListCollectionIdsRequest {
|
||||
// Required. The parent document. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
// For example:
|
||||
// `projects/my-project/databases/my-database/documents/chatrooms/my-chatroom`
|
||||
string parent = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The maximum number of results to return.
|
||||
int32 page_size = 2;
|
||||
|
||||
// A page token. Must be a value from
|
||||
// [ListCollectionIdsResponse][google.firestore.v1.ListCollectionIdsResponse].
|
||||
string page_token = 3;
|
||||
}
|
||||
|
||||
// The response from [Firestore.ListCollectionIds][google.firestore.v1.Firestore.ListCollectionIds].
|
||||
message ListCollectionIdsResponse {
|
||||
// The collection ids.
|
||||
repeated string collection_ids = 1;
|
||||
|
||||
// A page token that may be used to continue the list.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// The request for [Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite].
|
||||
message BatchWriteRequest {
|
||||
// Required. The database name. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}`.
|
||||
string database = 1 [(google.api.field_behavior) = REQUIRED];
|
||||
|
||||
// The writes to apply.
|
||||
//
|
||||
// Method does not apply writes atomically and does not guarantee ordering.
|
||||
// Each write succeeds or fails independently. You cannot write to the same
|
||||
// document more than once per request.
|
||||
repeated Write writes = 2;
|
||||
|
||||
// Labels associated with this batch write.
|
||||
map<string, string> labels = 3;
|
||||
}
|
||||
|
||||
// The response from [Firestore.BatchWrite][google.firestore.v1.Firestore.BatchWrite].
|
||||
message BatchWriteResponse {
|
||||
// The result of applying the writes.
|
||||
//
|
||||
// This i-th write result corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated WriteResult write_results = 1;
|
||||
|
||||
// The status of applying the writes.
|
||||
//
|
||||
// This i-th write status corresponds to the i-th write in the
|
||||
// request.
|
||||
repeated google.rpc.Status status = 2;
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,743 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
||||
|
||||
from google.cloud.firestore_v1.proto import (
|
||||
document_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2,
|
||||
)
|
||||
from google.cloud.firestore_v1.proto import (
|
||||
firestore_pb2 as google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2,
|
||||
)
|
||||
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
|
||||
|
||||
|
||||
class FirestoreStub(object):
|
||||
"""Specification of the Firestore API.
|
||||
|
||||
The Cloud Firestore service.
|
||||
|
||||
Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
|
||||
document database that simplifies storing, syncing, and querying data for
|
||||
your mobile, web, and IoT apps at global scale. Its client libraries provide
|
||||
live synchronization and offline support, while its security features and
|
||||
integrations with Firebase and Google Cloud Platform (GCP) accelerate
|
||||
building truly serverless apps.
|
||||
"""
|
||||
|
||||
def __init__(self, channel):
|
||||
"""Constructor.
|
||||
|
||||
Args:
|
||||
channel: A grpc.Channel.
|
||||
"""
|
||||
self.GetDocument = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/GetDocument",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.GetDocumentRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.FromString,
|
||||
)
|
||||
self.ListDocuments = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/ListDocuments",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.FromString,
|
||||
)
|
||||
self.UpdateDocument = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/UpdateDocument",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.FromString,
|
||||
)
|
||||
self.DeleteDocument = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/DeleteDocument",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.SerializeToString,
|
||||
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
||||
)
|
||||
self.BatchGetDocuments = channel.unary_stream(
|
||||
"/google.firestore.v1.Firestore/BatchGetDocuments",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.FromString,
|
||||
)
|
||||
self.BeginTransaction = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/BeginTransaction",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.FromString,
|
||||
)
|
||||
self.Commit = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/Commit",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CommitRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CommitResponse.FromString,
|
||||
)
|
||||
self.Rollback = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/Rollback",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RollbackRequest.SerializeToString,
|
||||
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
||||
)
|
||||
self.RunQuery = channel.unary_stream(
|
||||
"/google.firestore.v1.Firestore/RunQuery",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RunQueryRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RunQueryResponse.FromString,
|
||||
)
|
||||
self.PartitionQuery = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/PartitionQuery",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.PartitionQueryRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.PartitionQueryResponse.FromString,
|
||||
)
|
||||
self.Write = channel.stream_stream(
|
||||
"/google.firestore.v1.Firestore/Write",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.WriteRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.WriteResponse.FromString,
|
||||
)
|
||||
self.Listen = channel.stream_stream(
|
||||
"/google.firestore.v1.Firestore/Listen",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListenRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListenResponse.FromString,
|
||||
)
|
||||
self.ListCollectionIds = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/ListCollectionIds",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.FromString,
|
||||
)
|
||||
self.BatchWrite = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/BatchWrite",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchWriteRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchWriteResponse.FromString,
|
||||
)
|
||||
self.CreateDocument = channel.unary_unary(
|
||||
"/google.firestore.v1.Firestore/CreateDocument",
|
||||
request_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.SerializeToString,
|
||||
response_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.FromString,
|
||||
)
|
||||
|
||||
|
||||
class FirestoreServicer(object):
|
||||
"""Specification of the Firestore API.
|
||||
|
||||
The Cloud Firestore service.
|
||||
|
||||
Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
|
||||
document database that simplifies storing, syncing, and querying data for
|
||||
your mobile, web, and IoT apps at global scale. Its client libraries provide
|
||||
live synchronization and offline support, while its security features and
|
||||
integrations with Firebase and Google Cloud Platform (GCP) accelerate
|
||||
building truly serverless apps.
|
||||
"""
|
||||
|
||||
def GetDocument(self, request, context):
|
||||
"""Gets a single document.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def ListDocuments(self, request, context):
|
||||
"""Lists documents.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def UpdateDocument(self, request, context):
|
||||
"""Updates or inserts a document.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def DeleteDocument(self, request, context):
|
||||
"""Deletes a document.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def BatchGetDocuments(self, request, context):
|
||||
"""Gets multiple documents.
|
||||
|
||||
Documents returned by this method are not guaranteed to be returned in the
|
||||
same order that they were requested.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def BeginTransaction(self, request, context):
|
||||
"""Starts a new transaction.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def Commit(self, request, context):
|
||||
"""Commits a transaction, while optionally updating documents.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def Rollback(self, request, context):
|
||||
"""Rolls back a transaction.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def RunQuery(self, request, context):
|
||||
"""Runs a query.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def PartitionQuery(self, request, context):
|
||||
"""Partitions a query by returning partition cursors that can be used to run
|
||||
the query in parallel. The returned partition cursors are split points that
|
||||
can be used by RunQuery as starting/end points for the query results.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def Write(self, request_iterator, context):
|
||||
"""Streams batches of document updates and deletes, in order.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def Listen(self, request_iterator, context):
|
||||
"""Listens to changes.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def ListCollectionIds(self, request, context):
|
||||
"""Lists all the collection IDs underneath a document.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def BatchWrite(self, request, context):
|
||||
"""Applies a batch of write operations.
|
||||
|
||||
The BatchWrite method does not apply the write operations atomically
|
||||
and can apply them out of order. Method does not allow more than one write
|
||||
per document. Each write succeeds or fails independently. See the
|
||||
[BatchWriteResponse][google.firestore.v1.BatchWriteResponse] for the success status of each write.
|
||||
|
||||
If you require an atomically applied set of writes, use
|
||||
[Commit][google.firestore.v1.Firestore.Commit] instead.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
def CreateDocument(self, request, context):
|
||||
"""Creates a new document.
|
||||
"""
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Method not implemented!")
|
||||
raise NotImplementedError("Method not implemented!")
|
||||
|
||||
|
||||
def add_FirestoreServicer_to_server(servicer, server):
|
||||
rpc_method_handlers = {
|
||||
"GetDocument": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.GetDocument,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.GetDocumentRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.SerializeToString,
|
||||
),
|
||||
"ListDocuments": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.ListDocuments,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.SerializeToString,
|
||||
),
|
||||
"UpdateDocument": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.UpdateDocument,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.SerializeToString,
|
||||
),
|
||||
"DeleteDocument": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.DeleteDocument,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.FromString,
|
||||
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
||||
),
|
||||
"BatchGetDocuments": grpc.unary_stream_rpc_method_handler(
|
||||
servicer.BatchGetDocuments,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.SerializeToString,
|
||||
),
|
||||
"BeginTransaction": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.BeginTransaction,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.SerializeToString,
|
||||
),
|
||||
"Commit": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Commit,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CommitRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CommitResponse.SerializeToString,
|
||||
),
|
||||
"Rollback": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.Rollback,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RollbackRequest.FromString,
|
||||
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
|
||||
),
|
||||
"RunQuery": grpc.unary_stream_rpc_method_handler(
|
||||
servicer.RunQuery,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RunQueryRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RunQueryResponse.SerializeToString,
|
||||
),
|
||||
"PartitionQuery": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.PartitionQuery,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.PartitionQueryRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.PartitionQueryResponse.SerializeToString,
|
||||
),
|
||||
"Write": grpc.stream_stream_rpc_method_handler(
|
||||
servicer.Write,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.WriteRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.WriteResponse.SerializeToString,
|
||||
),
|
||||
"Listen": grpc.stream_stream_rpc_method_handler(
|
||||
servicer.Listen,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListenRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListenResponse.SerializeToString,
|
||||
),
|
||||
"ListCollectionIds": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.ListCollectionIds,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.SerializeToString,
|
||||
),
|
||||
"BatchWrite": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.BatchWrite,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchWriteRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchWriteResponse.SerializeToString,
|
||||
),
|
||||
"CreateDocument": grpc.unary_unary_rpc_method_handler(
|
||||
servicer.CreateDocument,
|
||||
request_deserializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.FromString,
|
||||
response_serializer=google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.SerializeToString,
|
||||
),
|
||||
}
|
||||
generic_handler = grpc.method_handlers_generic_handler(
|
||||
"google.firestore.v1.Firestore", rpc_method_handlers
|
||||
)
|
||||
server.add_generic_rpc_handlers((generic_handler,))
|
||||
|
||||
|
||||
# This class is part of an EXPERIMENTAL API.
|
||||
class Firestore(object):
|
||||
"""Specification of the Firestore API.
|
||||
|
||||
The Cloud Firestore service.
|
||||
|
||||
Cloud Firestore is a fast, fully managed, serverless, cloud-native NoSQL
|
||||
document database that simplifies storing, syncing, and querying data for
|
||||
your mobile, web, and IoT apps at global scale. Its client libraries provide
|
||||
live synchronization and offline support, while its security features and
|
||||
integrations with Firebase and Google Cloud Platform (GCP) accelerate
|
||||
building truly serverless apps.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def GetDocument(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/GetDocument",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.GetDocumentRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def ListDocuments(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/ListDocuments",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListDocumentsRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListDocumentsResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def UpdateDocument(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/UpdateDocument",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.UpdateDocumentRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def DeleteDocument(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/DeleteDocument",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.DeleteDocumentRequest.SerializeToString,
|
||||
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def BatchGetDocuments(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_stream(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/BatchGetDocuments",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchGetDocumentsRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchGetDocumentsResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def BeginTransaction(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/BeginTransaction",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BeginTransactionRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BeginTransactionResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def Commit(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/Commit",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CommitRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CommitResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def Rollback(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/Rollback",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RollbackRequest.SerializeToString,
|
||||
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def RunQuery(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_stream(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/RunQuery",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RunQueryRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.RunQueryResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def PartitionQuery(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/PartitionQuery",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.PartitionQueryRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.PartitionQueryResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def Write(
|
||||
request_iterator,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.stream_stream(
|
||||
request_iterator,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/Write",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.WriteRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.WriteResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def Listen(
|
||||
request_iterator,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.stream_stream(
|
||||
request_iterator,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/Listen",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListenRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListenResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def ListCollectionIds(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/ListCollectionIds",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListCollectionIdsRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.ListCollectionIdsResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def BatchWrite(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/BatchWrite",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchWriteRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.BatchWriteResponse.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def CreateDocument(
|
||||
request,
|
||||
target,
|
||||
options=(),
|
||||
channel_credentials=None,
|
||||
call_credentials=None,
|
||||
compression=None,
|
||||
wait_for_ready=None,
|
||||
timeout=None,
|
||||
metadata=None,
|
||||
):
|
||||
return grpc.experimental.unary_unary(
|
||||
request,
|
||||
target,
|
||||
"/google.firestore.v1.Firestore/CreateDocument",
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_firestore__pb2.CreateDocumentRequest.SerializeToString,
|
||||
google_dot_cloud_dot_firestore__v1_dot_proto_dot_document__pb2.Document.FromString,
|
||||
options,
|
||||
channel_credentials,
|
||||
call_credentials,
|
||||
compression,
|
||||
wait_for_ready,
|
||||
timeout,
|
||||
metadata,
|
||||
)
|
|
@ -0,0 +1,267 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/firestore/v1/document.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "QueryProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A Firestore query.
|
||||
message StructuredQuery {
|
||||
// A selection of a collection, such as `messages as m1`.
|
||||
message CollectionSelector {
|
||||
// The collection ID.
|
||||
// When set, selects only collections with this ID.
|
||||
string collection_id = 2;
|
||||
|
||||
// When false, selects only collections that are immediate children of
|
||||
// the `parent` specified in the containing `RunQueryRequest`.
|
||||
// When true, selects all descendant collections.
|
||||
bool all_descendants = 3;
|
||||
}
|
||||
|
||||
// A filter.
|
||||
message Filter {
|
||||
// The type of filter.
|
||||
oneof filter_type {
|
||||
// A composite filter.
|
||||
CompositeFilter composite_filter = 1;
|
||||
|
||||
// A filter on a document field.
|
||||
FieldFilter field_filter = 2;
|
||||
|
||||
// A filter that takes exactly one argument.
|
||||
UnaryFilter unary_filter = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// A filter that merges multiple other filters using the given operator.
|
||||
message CompositeFilter {
|
||||
// A composite filter operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The results are required to satisfy each of the combined filters.
|
||||
AND = 1;
|
||||
}
|
||||
|
||||
// The operator for combining multiple filters.
|
||||
Operator op = 1;
|
||||
|
||||
// The list of filters to combine.
|
||||
// Must contain at least one filter.
|
||||
repeated Filter filters = 2;
|
||||
}
|
||||
|
||||
// A filter on a specific field.
|
||||
message FieldFilter {
|
||||
// A field filter operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The given `field` is less than the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
LESS_THAN = 1;
|
||||
|
||||
// The given `field` is less than or equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
LESS_THAN_OR_EQUAL = 2;
|
||||
|
||||
// The given `field` is greater than the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
GREATER_THAN = 3;
|
||||
|
||||
// The given `field` is greater than or equal to the given `value`.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `field` come first in `order_by`.
|
||||
GREATER_THAN_OR_EQUAL = 4;
|
||||
|
||||
// The given `field` is equal to the given `value`.
|
||||
EQUAL = 5;
|
||||
|
||||
// The given `field` is an array that contains the given `value`.
|
||||
ARRAY_CONTAINS = 7;
|
||||
|
||||
// The given `field` is equal to at least one value in the given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue` with at most 10 values.
|
||||
// * No other `IN`, `ARRAY_CONTAINS_ANY`, or `NOT_IN`.
|
||||
IN = 8;
|
||||
|
||||
// The given `field` is an array that contains any of the values in the
|
||||
// given array.
|
||||
//
|
||||
// Requires:
|
||||
//
|
||||
// * That `value` is a non-empty `ArrayValue` with at most 10 values.
|
||||
// * No other `IN`, `ARRAY_CONTAINS_ANY`, or `NOT_IN`.
|
||||
ARRAY_CONTAINS_ANY = 9;
|
||||
}
|
||||
|
||||
// The field to filter by.
|
||||
FieldReference field = 1;
|
||||
|
||||
// The operator to filter by.
|
||||
Operator op = 2;
|
||||
|
||||
// The value to compare to.
|
||||
Value value = 3;
|
||||
}
|
||||
|
||||
// A filter with a single operand.
|
||||
message UnaryFilter {
|
||||
// A unary operator.
|
||||
enum Operator {
|
||||
// Unspecified. This value must not be used.
|
||||
OPERATOR_UNSPECIFIED = 0;
|
||||
|
||||
// The given `field` is equal to `NaN`.
|
||||
IS_NAN = 2;
|
||||
|
||||
// The given `field` is equal to `NULL`.
|
||||
IS_NULL = 3;
|
||||
}
|
||||
|
||||
// The unary operator to apply.
|
||||
Operator op = 1;
|
||||
|
||||
// The argument to the filter.
|
||||
oneof operand_type {
|
||||
// The field to which to apply the operator.
|
||||
FieldReference field = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A reference to a field, such as `max(messages.time) as max_time`.
|
||||
message FieldReference {
|
||||
string field_path = 2;
|
||||
}
|
||||
|
||||
// The projection of document's fields to return.
|
||||
message Projection {
|
||||
// The fields to return.
|
||||
//
|
||||
// If empty, all fields are returned. To only return the name
|
||||
// of the document, use `['__name__']`.
|
||||
repeated FieldReference fields = 2;
|
||||
}
|
||||
|
||||
// An order on a field.
|
||||
message Order {
|
||||
// The field to order by.
|
||||
FieldReference field = 1;
|
||||
|
||||
// The direction to order by. Defaults to `ASCENDING`.
|
||||
Direction direction = 2;
|
||||
}
|
||||
|
||||
// A sort direction.
|
||||
enum Direction {
|
||||
// Unspecified.
|
||||
DIRECTION_UNSPECIFIED = 0;
|
||||
|
||||
// Ascending.
|
||||
ASCENDING = 1;
|
||||
|
||||
// Descending.
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
// The projection to return.
|
||||
Projection select = 1;
|
||||
|
||||
// The collections to query.
|
||||
repeated CollectionSelector from = 2;
|
||||
|
||||
// The filter to apply.
|
||||
Filter where = 3;
|
||||
|
||||
// The order to apply to the query results.
|
||||
//
|
||||
// Firestore guarantees a stable ordering through the following rules:
|
||||
//
|
||||
// * Any field required to appear in `order_by`, that is not already
|
||||
// specified in `order_by`, is appended to the order in field name order
|
||||
// by default.
|
||||
// * If an order on `__name__` is not specified, it is appended by default.
|
||||
//
|
||||
// Fields are appended with the same sort direction as the last order
|
||||
// specified, or 'ASCENDING' if no order was specified. For example:
|
||||
//
|
||||
// * `SELECT * FROM Foo ORDER BY A` becomes
|
||||
// `SELECT * FROM Foo ORDER BY A, __name__`
|
||||
// * `SELECT * FROM Foo ORDER BY A DESC` becomes
|
||||
// `SELECT * FROM Foo ORDER BY A DESC, __name__ DESC`
|
||||
// * `SELECT * FROM Foo WHERE A > 1` becomes
|
||||
// `SELECT * FROM Foo WHERE A > 1 ORDER BY A, __name__`
|
||||
repeated Order order_by = 4;
|
||||
|
||||
// A starting point for the query results.
|
||||
Cursor start_at = 7;
|
||||
|
||||
// A end point for the query results.
|
||||
Cursor end_at = 8;
|
||||
|
||||
// The number of results to skip.
|
||||
//
|
||||
// Applies before limit, but after all other constraints. Must be >= 0 if
|
||||
// specified.
|
||||
int32 offset = 6;
|
||||
|
||||
// The maximum number of results to return.
|
||||
//
|
||||
// Applies after all other constraints.
|
||||
// Must be >= 0 if specified.
|
||||
google.protobuf.Int32Value limit = 5;
|
||||
}
|
||||
|
||||
// A position in a query result set.
|
||||
message Cursor {
|
||||
// The values that represent a position, in the order they appear in
|
||||
// the order by clause of a query.
|
||||
//
|
||||
// Can contain fewer values than specified in the order by clause.
|
||||
repeated Value values = 1;
|
||||
|
||||
// If the position is just before or just after the given values, relative
|
||||
// to the sort order defined by the query.
|
||||
bool before = 2;
|
||||
}
|
1280
venv/Lib/site-packages/google/cloud/firestore_v1/proto/query_pb2.py
Normal file
1280
venv/Lib/site-packages/google/cloud/firestore_v1/proto/query_pb2.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,3 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
File diff suppressed because one or more lines are too long
2208
venv/Lib/site-packages/google/cloud/firestore_v1/proto/tests_pb2.py
Normal file
2208
venv/Lib/site-packages/google/cloud/firestore_v1/proto/tests_pb2.py
Normal file
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,258 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.firestore.v1;
|
||||
|
||||
import "google/firestore/v1/common.proto";
|
||||
import "google/firestore/v1/document.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Firestore.V1";
|
||||
option go_package = "google.golang.org/genproto/googleapis/firestore/v1;firestore";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "WriteProto";
|
||||
option java_package = "com.google.firestore.v1";
|
||||
option objc_class_prefix = "GCFS";
|
||||
option php_namespace = "Google\\Cloud\\Firestore\\V1";
|
||||
option ruby_package = "Google::Cloud::Firestore::V1";
|
||||
|
||||
// A write on a document.
|
||||
message Write {
|
||||
// The operation to execute.
|
||||
oneof operation {
|
||||
// A document to write.
|
||||
Document update = 1;
|
||||
|
||||
// A document name to delete. In the format:
|
||||
// `projects/{project_id}/databases/{database_id}/documents/{document_path}`.
|
||||
string delete = 2;
|
||||
|
||||
// Applies a transformation to a document.
|
||||
DocumentTransform transform = 6;
|
||||
}
|
||||
|
||||
// The fields to update in this write.
|
||||
//
|
||||
// This field can be set only when the operation is `update`.
|
||||
// If the mask is not set for an `update` and the document exists, any
|
||||
// existing data will be overwritten.
|
||||
// If the mask is set and the document on the server has fields not covered by
|
||||
// the mask, they are left unchanged.
|
||||
// Fields referenced in the mask, but not present in the input document, are
|
||||
// deleted from the document on the server.
|
||||
// The field paths in this mask must not contain a reserved field name.
|
||||
DocumentMask update_mask = 3;
|
||||
|
||||
// The transforms to perform after update.
|
||||
//
|
||||
// This field can be set only when the operation is `update`. If present, this
|
||||
// write is equivalent to performing `update` and `transform` to the same
|
||||
// document atomically and in order.
|
||||
repeated DocumentTransform.FieldTransform update_transforms = 7;
|
||||
|
||||
// An optional precondition on the document.
|
||||
//
|
||||
// The write will fail if this is set and not met by the target document.
|
||||
Precondition current_document = 4;
|
||||
}
|
||||
|
||||
// A transformation of a document.
|
||||
message DocumentTransform {
|
||||
// A transformation of a field of the document.
|
||||
message FieldTransform {
|
||||
// A value that is calculated by the server.
|
||||
enum ServerValue {
|
||||
// Unspecified. This value must not be used.
|
||||
SERVER_VALUE_UNSPECIFIED = 0;
|
||||
|
||||
// The time at which the server processed the request, with millisecond
|
||||
// precision.
|
||||
REQUEST_TIME = 1;
|
||||
}
|
||||
|
||||
// The path of the field. See [Document.fields][google.firestore.v1.Document.fields] for the field path syntax
|
||||
// reference.
|
||||
string field_path = 1;
|
||||
|
||||
// The transformation to apply on the field.
|
||||
oneof transform_type {
|
||||
// Sets the field to the given server value.
|
||||
ServerValue set_to_server_value = 2;
|
||||
|
||||
// Adds the given value to the field's current value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the given value.
|
||||
// If either of the given value or the current field value are doubles,
|
||||
// both values will be interpreted as doubles. Double arithmetic and
|
||||
// representation of double values follow IEEE 754 semantics.
|
||||
// If there is positive/negative integer overflow, the field is resolved
|
||||
// to the largest magnitude positive/negative integer.
|
||||
Value increment = 3;
|
||||
|
||||
// Sets the field to the maximum of its current value and the given value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the given value.
|
||||
// If a maximum operation is applied where the field and the input value
|
||||
// are of mixed types (that is - one is an integer and one is a double)
|
||||
// the field takes on the type of the larger operand. If the operands are
|
||||
// equivalent (e.g. 3 and 3.0), the field does not change.
|
||||
// 0, 0.0, and -0.0 are all zero. The maximum of a zero stored value and
|
||||
// zero input value is always the stored value.
|
||||
// The maximum of any numeric value x and NaN is NaN.
|
||||
Value maximum = 4;
|
||||
|
||||
// Sets the field to the minimum of its current value and the given value.
|
||||
//
|
||||
// This must be an integer or a double value.
|
||||
// If the field is not an integer or double, or if the field does not yet
|
||||
// exist, the transformation will set the field to the input value.
|
||||
// If a minimum operation is applied where the field and the input value
|
||||
// are of mixed types (that is - one is an integer and one is a double)
|
||||
// the field takes on the type of the smaller operand. If the operands are
|
||||
// equivalent (e.g. 3 and 3.0), the field does not change.
|
||||
// 0, 0.0, and -0.0 are all zero. The minimum of a zero stored value and
|
||||
// zero input value is always the stored value.
|
||||
// The minimum of any numeric value x and NaN is NaN.
|
||||
Value minimum = 5;
|
||||
|
||||
// Append the given elements in order if they are not already present in
|
||||
// the current field value.
|
||||
// If the field is not an array, or if the field does not yet exist, it is
|
||||
// first set to the empty array.
|
||||
//
|
||||
// Equivalent numbers of different types (e.g. 3L and 3.0) are
|
||||
// considered equal when checking if a value is missing.
|
||||
// NaN is equal to NaN, and Null is equal to Null.
|
||||
// If the input contains multiple equivalent values, only the first will
|
||||
// be considered.
|
||||
//
|
||||
// The corresponding transform_result will be the null value.
|
||||
ArrayValue append_missing_elements = 6;
|
||||
|
||||
// Remove all of the given elements from the array in the field.
|
||||
// If the field is not an array, or if the field does not yet exist, it is
|
||||
// set to the empty array.
|
||||
//
|
||||
// Equivalent numbers of the different types (e.g. 3L and 3.0) are
|
||||
// considered equal when deciding whether an element should be removed.
|
||||
// NaN is equal to NaN, and Null is equal to Null.
|
||||
// This will remove all equivalent values if there are duplicates.
|
||||
//
|
||||
// The corresponding transform_result will be the null value.
|
||||
ArrayValue remove_all_from_array = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// The name of the document to transform.
|
||||
string document = 1;
|
||||
|
||||
// The list of transformations to apply to the fields of the document, in
|
||||
// order.
|
||||
// This must not be empty.
|
||||
repeated FieldTransform field_transforms = 2;
|
||||
}
|
||||
|
||||
// The result of applying a write.
|
||||
message WriteResult {
|
||||
// The last update time of the document after applying the write. Not set
|
||||
// after a `delete`.
|
||||
//
|
||||
// If the write did not actually change the document, this will be the
|
||||
// previous update_time.
|
||||
google.protobuf.Timestamp update_time = 1;
|
||||
|
||||
// The results of applying each [DocumentTransform.FieldTransform][google.firestore.v1.DocumentTransform.FieldTransform], in the
|
||||
// same order.
|
||||
repeated Value transform_results = 2;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has changed.
|
||||
//
|
||||
// May be the result of multiple [writes][google.firestore.v1.Write], including deletes, that
|
||||
// ultimately resulted in a new value for the [Document][google.firestore.v1.Document].
|
||||
//
|
||||
// Multiple [DocumentChange][google.firestore.v1.DocumentChange] messages may be returned for the same logical
|
||||
// change, if multiple targets are affected.
|
||||
message DocumentChange {
|
||||
// The new state of the [Document][google.firestore.v1.Document].
|
||||
//
|
||||
// If `mask` is set, contains only fields that were updated or added.
|
||||
Document document = 1;
|
||||
|
||||
// A set of target IDs of targets that match this document.
|
||||
repeated int32 target_ids = 5;
|
||||
|
||||
// A set of target IDs for targets that no longer match this document.
|
||||
repeated int32 removed_target_ids = 6;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has been deleted.
|
||||
//
|
||||
// May be the result of multiple [writes][google.firestore.v1.Write], including updates, the
|
||||
// last of which deleted the [Document][google.firestore.v1.Document].
|
||||
//
|
||||
// Multiple [DocumentDelete][google.firestore.v1.DocumentDelete] messages may be returned for the same logical
|
||||
// delete, if multiple targets are affected.
|
||||
message DocumentDelete {
|
||||
// The resource name of the [Document][google.firestore.v1.Document] that was deleted.
|
||||
string document = 1;
|
||||
|
||||
// A set of target IDs for targets that previously matched this entity.
|
||||
repeated int32 removed_target_ids = 6;
|
||||
|
||||
// The read timestamp at which the delete was observed.
|
||||
//
|
||||
// Greater or equal to the `commit_time` of the delete.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// A [Document][google.firestore.v1.Document] has been removed from the view of the targets.
|
||||
//
|
||||
// Sent if the document is no longer relevant to a target and is out of view.
|
||||
// Can be sent instead of a DocumentDelete or a DocumentChange if the server
|
||||
// can not send the new value of the document.
|
||||
//
|
||||
// Multiple [DocumentRemove][google.firestore.v1.DocumentRemove] messages may be returned for the same logical
|
||||
// write or delete, if multiple targets are affected.
|
||||
message DocumentRemove {
|
||||
// The resource name of the [Document][google.firestore.v1.Document] that has gone out of view.
|
||||
string document = 1;
|
||||
|
||||
// A set of target IDs for targets that previously matched this document.
|
||||
repeated int32 removed_target_ids = 2;
|
||||
|
||||
// The read timestamp at which the remove was observed.
|
||||
//
|
||||
// Greater or equal to the `commit_time` of the change/delete/remove.
|
||||
google.protobuf.Timestamp read_time = 4;
|
||||
}
|
||||
|
||||
// A digest of all the documents that match a given target.
|
||||
message ExistenceFilter {
|
||||
// The target ID to which this filter applies.
|
||||
int32 target_id = 1;
|
||||
|
||||
// The total count of documents that match [target_id][google.firestore.v1.ExistenceFilter.target_id].
|
||||
//
|
||||
// If different from the count of documents in the client that match, the
|
||||
// client must manually determine which documents no longer match the target.
|
||||
int32 count = 2;
|
||||
}
|
1193
venv/Lib/site-packages/google/cloud/firestore_v1/proto/write_pb2.py
Normal file
1193
venv/Lib/site-packages/google/cloud/firestore_v1/proto/write_pb2.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,3 @@
|
|||
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
|
||||
"""Client and server classes corresponding to protobuf-defined services."""
|
||||
import grpc
|
1129
venv/Lib/site-packages/google/cloud/firestore_v1/query.py
Normal file
1129
venv/Lib/site-packages/google/cloud/firestore_v1/query.py
Normal file
File diff suppressed because it is too large
Load diff
442
venv/Lib/site-packages/google/cloud/firestore_v1/transaction.py
Normal file
442
venv/Lib/site-packages/google/cloud/firestore_v1/transaction.py
Normal file
|
@ -0,0 +1,442 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helpers for applying Google Cloud Firestore changes in a transaction."""
|
||||
|
||||
|
||||
import random
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
from google.api_core import exceptions
|
||||
from google.cloud.firestore_v1 import batch
|
||||
from google.cloud.firestore_v1 import types
|
||||
from google.cloud.firestore_v1.document import DocumentReference
|
||||
from google.cloud.firestore_v1.query import Query
|
||||
|
||||
|
||||
MAX_ATTEMPTS = 5
|
||||
"""int: Default number of transaction attempts (with retries)."""
|
||||
_CANT_BEGIN = "The transaction has already begun. Current transaction ID: {!r}."
|
||||
_MISSING_ID_TEMPLATE = "The transaction has no transaction ID, so it cannot be {}."
|
||||
_CANT_ROLLBACK = _MISSING_ID_TEMPLATE.format("rolled back")
|
||||
_CANT_COMMIT = _MISSING_ID_TEMPLATE.format("committed")
|
||||
_WRITE_READ_ONLY = "Cannot perform write operation in read-only transaction."
|
||||
_INITIAL_SLEEP = 1.0
|
||||
"""float: Initial "max" for sleep interval. To be used in :func:`_sleep`."""
|
||||
_MAX_SLEEP = 30.0
|
||||
"""float: Eventual "max" sleep time. To be used in :func:`_sleep`."""
|
||||
_MULTIPLIER = 2.0
|
||||
"""float: Multiplier for exponential backoff. To be used in :func:`_sleep`."""
|
||||
_EXCEED_ATTEMPTS_TEMPLATE = "Failed to commit transaction in {:d} attempts."
|
||||
_CANT_RETRY_READ_ONLY = "Only read-write transactions can be retried."
|
||||
|
||||
|
||||
class Transaction(batch.WriteBatch):
|
||||
"""Accumulate read-and-write operations to be sent in a transaction.
|
||||
|
||||
Args:
|
||||
client (:class:`~google.cloud.firestore_v1.client.Client`):
|
||||
The client that created this transaction.
|
||||
max_attempts (Optional[int]): The maximum number of attempts for
|
||||
the transaction (i.e. allowing retries). Defaults to
|
||||
:attr:`~google.cloud.firestore_v1.transaction.MAX_ATTEMPTS`.
|
||||
read_only (Optional[bool]): Flag indicating if the transaction
|
||||
should be read-only or should allow writes. Defaults to
|
||||
:data:`False`.
|
||||
"""
|
||||
|
||||
def __init__(self, client, max_attempts=MAX_ATTEMPTS, read_only=False):
|
||||
super(Transaction, self).__init__(client)
|
||||
self._max_attempts = max_attempts
|
||||
self._read_only = read_only
|
||||
self._id = None
|
||||
|
||||
def _add_write_pbs(self, write_pbs):
|
||||
"""Add `Write`` protobufs to this transaction.
|
||||
|
||||
Args:
|
||||
write_pbs (List[google.cloud.proto.firestore.v1.\
|
||||
write_pb2.Write]): A list of write protobufs to be added.
|
||||
|
||||
Raises:
|
||||
ValueError: If this transaction is read-only.
|
||||
"""
|
||||
if self._read_only:
|
||||
raise ValueError(_WRITE_READ_ONLY)
|
||||
|
||||
super(Transaction, self)._add_write_pbs(write_pbs)
|
||||
|
||||
def _options_protobuf(self, retry_id):
|
||||
"""Convert the current object to protobuf.
|
||||
|
||||
The ``retry_id`` value is used when retrying a transaction that
|
||||
failed (e.g. due to contention). It is intended to be the "first"
|
||||
transaction that failed (i.e. if multiple retries are needed).
|
||||
|
||||
Args:
|
||||
retry_id (Union[bytes, NoneType]): Transaction ID of a transaction
|
||||
to be retried.
|
||||
|
||||
Returns:
|
||||
Optional[google.cloud.firestore_v1.types.TransactionOptions]:
|
||||
The protobuf ``TransactionOptions`` if ``read_only==True`` or if
|
||||
there is a transaction ID to be retried, else :data:`None`.
|
||||
|
||||
Raises:
|
||||
ValueError: If ``retry_id`` is not :data:`None` but the
|
||||
transaction is read-only.
|
||||
"""
|
||||
if retry_id is not None:
|
||||
if self._read_only:
|
||||
raise ValueError(_CANT_RETRY_READ_ONLY)
|
||||
|
||||
return types.TransactionOptions(
|
||||
read_write=types.TransactionOptions.ReadWrite(
|
||||
retry_transaction=retry_id
|
||||
)
|
||||
)
|
||||
elif self._read_only:
|
||||
return types.TransactionOptions(
|
||||
read_only=types.TransactionOptions.ReadOnly()
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
@property
|
||||
def in_progress(self):
|
||||
"""Determine if this transaction has already begun.
|
||||
|
||||
Returns:
|
||||
bool: Indicates if the transaction has started.
|
||||
"""
|
||||
return self._id is not None
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
"""Get the current transaction ID.
|
||||
|
||||
Returns:
|
||||
Optional[bytes]: The transaction ID (or :data:`None` if the
|
||||
current transaction is not in progress).
|
||||
"""
|
||||
return self._id
|
||||
|
||||
def _begin(self, retry_id=None):
|
||||
"""Begin the transaction.
|
||||
|
||||
Args:
|
||||
retry_id (Optional[bytes]): Transaction ID of a transaction to be
|
||||
retried.
|
||||
|
||||
Raises:
|
||||
ValueError: If the current transaction has already begun.
|
||||
"""
|
||||
if self.in_progress:
|
||||
msg = _CANT_BEGIN.format(self._id)
|
||||
raise ValueError(msg)
|
||||
|
||||
transaction_response = self._client._firestore_api.begin_transaction(
|
||||
self._client._database_string,
|
||||
options_=self._options_protobuf(retry_id),
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
self._id = transaction_response.transaction
|
||||
|
||||
def _clean_up(self):
|
||||
"""Clean up the instance after :meth:`_rollback`` or :meth:`_commit``.
|
||||
|
||||
This intended to occur on success or failure of the associated RPCs.
|
||||
"""
|
||||
self._write_pbs = []
|
||||
self._id = None
|
||||
|
||||
def _rollback(self):
|
||||
"""Roll back the transaction.
|
||||
|
||||
Raises:
|
||||
ValueError: If no transaction is in progress.
|
||||
"""
|
||||
if not self.in_progress:
|
||||
raise ValueError(_CANT_ROLLBACK)
|
||||
|
||||
try:
|
||||
# NOTE: The response is just ``google.protobuf.Empty``.
|
||||
self._client._firestore_api.rollback(
|
||||
self._client._database_string,
|
||||
self._id,
|
||||
metadata=self._client._rpc_metadata,
|
||||
)
|
||||
finally:
|
||||
self._clean_up()
|
||||
|
||||
def _commit(self):
|
||||
"""Transactionally commit the changes accumulated.
|
||||
|
||||
Returns:
|
||||
List[:class:`google.cloud.proto.firestore.v1.write_pb2.WriteResult`, ...]:
|
||||
The write results corresponding to the changes committed, returned
|
||||
in the same order as the changes were applied to this transaction.
|
||||
A write result contains an ``update_time`` field.
|
||||
|
||||
Raises:
|
||||
ValueError: If no transaction is in progress.
|
||||
"""
|
||||
if not self.in_progress:
|
||||
raise ValueError(_CANT_COMMIT)
|
||||
|
||||
commit_response = _commit_with_retry(self._client, self._write_pbs, self._id)
|
||||
|
||||
self._clean_up()
|
||||
return list(commit_response.write_results)
|
||||
|
||||
def get_all(self, references):
|
||||
"""Retrieves multiple documents from Firestore.
|
||||
|
||||
Args:
|
||||
references (List[.DocumentReference, ...]): Iterable of document
|
||||
references to be retrieved.
|
||||
|
||||
Yields:
|
||||
.DocumentSnapshot: The next document snapshot that fulfills the
|
||||
query, or :data:`None` if the document does not exist.
|
||||
"""
|
||||
return self._client.get_all(references, transaction=self)
|
||||
|
||||
def get(self, ref_or_query):
|
||||
"""
|
||||
Retrieve a document or a query result from the database.
|
||||
Args:
|
||||
ref_or_query The document references or query object to return.
|
||||
Yields:
|
||||
.DocumentSnapshot: The next document snapshot that fulfills the
|
||||
query, or :data:`None` if the document does not exist.
|
||||
"""
|
||||
if isinstance(ref_or_query, DocumentReference):
|
||||
return self._client.get_all([ref_or_query], transaction=self)
|
||||
elif isinstance(ref_or_query, Query):
|
||||
return ref_or_query.stream(transaction=self)
|
||||
else:
|
||||
raise ValueError(
|
||||
'Value for argument "ref_or_query" must be a DocumentReference or a Query.'
|
||||
)
|
||||
|
||||
|
||||
class _Transactional(object):
|
||||
"""Provide a callable object to use as a transactional decorater.
|
||||
|
||||
This is surfaced via
|
||||
:func:`~google.cloud.firestore_v1.transaction.transactional`.
|
||||
|
||||
Args:
|
||||
to_wrap (Callable[[:class:`~google.cloud.firestore_v1.transaction.Transaction`, ...], Any]):
|
||||
A callable that should be run (and retried) in a transaction.
|
||||
"""
|
||||
|
||||
def __init__(self, to_wrap):
|
||||
self.to_wrap = to_wrap
|
||||
self.current_id = None
|
||||
"""Optional[bytes]: The current transaction ID."""
|
||||
self.retry_id = None
|
||||
"""Optional[bytes]: The ID of the first attempted transaction."""
|
||||
|
||||
def _reset(self):
|
||||
"""Unset the transaction IDs."""
|
||||
self.current_id = None
|
||||
self.retry_id = None
|
||||
|
||||
def _pre_commit(self, transaction, *args, **kwargs):
|
||||
"""Begin transaction and call the wrapped callable.
|
||||
|
||||
If the callable raises an exception, the transaction will be rolled
|
||||
back. If not, the transaction will be "ready" for ``Commit`` (i.e.
|
||||
it will have staged writes).
|
||||
|
||||
Args:
|
||||
transaction
|
||||
(:class:`~google.cloud.firestore_v1.transaction.Transaction`):
|
||||
A transaction to execute the callable within.
|
||||
args (Tuple[Any, ...]): The extra positional arguments to pass
|
||||
along to the wrapped callable.
|
||||
kwargs (Dict[str, Any]): The extra keyword arguments to pass
|
||||
along to the wrapped callable.
|
||||
|
||||
Returns:
|
||||
Any: result of the wrapped callable.
|
||||
|
||||
Raises:
|
||||
Exception: Any failure caused by ``to_wrap``.
|
||||
"""
|
||||
# Force the ``transaction`` to be not "in progress".
|
||||
transaction._clean_up()
|
||||
transaction._begin(retry_id=self.retry_id)
|
||||
|
||||
# Update the stored transaction IDs.
|
||||
self.current_id = transaction._id
|
||||
if self.retry_id is None:
|
||||
self.retry_id = self.current_id
|
||||
try:
|
||||
return self.to_wrap(transaction, *args, **kwargs)
|
||||
except: # noqa
|
||||
# NOTE: If ``rollback`` fails this will lose the information
|
||||
# from the original failure.
|
||||
transaction._rollback()
|
||||
raise
|
||||
|
||||
def _maybe_commit(self, transaction):
|
||||
"""Try to commit the transaction.
|
||||
|
||||
If the transaction is read-write and the ``Commit`` fails with the
|
||||
``ABORTED`` status code, it will be retried. Any other failure will
|
||||
not be caught.
|
||||
|
||||
Args:
|
||||
transaction
|
||||
(:class:`~google.cloud.firestore_v1.transaction.Transaction`):
|
||||
The transaction to be ``Commit``-ed.
|
||||
|
||||
Returns:
|
||||
bool: Indicating if the commit succeeded.
|
||||
"""
|
||||
try:
|
||||
transaction._commit()
|
||||
return True
|
||||
except exceptions.GoogleAPICallError as exc:
|
||||
if transaction._read_only:
|
||||
raise
|
||||
|
||||
if isinstance(exc, exceptions.Aborted):
|
||||
# If a read-write transaction returns ABORTED, retry.
|
||||
return False
|
||||
else:
|
||||
raise
|
||||
|
||||
def __call__(self, transaction, *args, **kwargs):
|
||||
"""Execute the wrapped callable within a transaction.
|
||||
|
||||
Args:
|
||||
transaction
|
||||
(:class:`~google.cloud.firestore_v1.transaction.Transaction`):
|
||||
A transaction to execute the callable within.
|
||||
args (Tuple[Any, ...]): The extra positional arguments to pass
|
||||
along to the wrapped callable.
|
||||
kwargs (Dict[str, Any]): The extra keyword arguments to pass
|
||||
along to the wrapped callable.
|
||||
|
||||
Returns:
|
||||
Any: The result of the wrapped callable.
|
||||
|
||||
Raises:
|
||||
ValueError: If the transaction does not succeed in
|
||||
``max_attempts``.
|
||||
"""
|
||||
self._reset()
|
||||
|
||||
for attempt in six.moves.xrange(transaction._max_attempts):
|
||||
result = self._pre_commit(transaction, *args, **kwargs)
|
||||
succeeded = self._maybe_commit(transaction)
|
||||
if succeeded:
|
||||
return result
|
||||
|
||||
# Subsequent requests will use the failed transaction ID as part of
|
||||
# the ``BeginTransactionRequest`` when restarting this transaction
|
||||
# (via ``options.retry_transaction``). This preserves the "spot in
|
||||
# line" of the transaction, so exponential backoff is not required
|
||||
# in this case.
|
||||
|
||||
transaction._rollback()
|
||||
msg = _EXCEED_ATTEMPTS_TEMPLATE.format(transaction._max_attempts)
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def transactional(to_wrap):
|
||||
"""Decorate a callable so that it runs in a transaction.
|
||||
|
||||
Args:
|
||||
to_wrap
|
||||
(Callable[[:class:`~google.cloud.firestore_v1.transaction.Transaction`, ...], Any]):
|
||||
A callable that should be run (and retried) in a transaction.
|
||||
|
||||
Returns:
|
||||
Callable[[:class:`~google.cloud.firestore_v1.transaction.Transaction`, ...], Any]:
|
||||
the wrapped callable.
|
||||
"""
|
||||
return _Transactional(to_wrap)
|
||||
|
||||
|
||||
def _commit_with_retry(client, write_pbs, transaction_id):
|
||||
"""Call ``Commit`` on the GAPIC client with retry / sleep.
|
||||
|
||||
Retries the ``Commit`` RPC on Unavailable. Usually this RPC-level
|
||||
retry is handled by the underlying GAPICd client, but in this case it
|
||||
doesn't because ``Commit`` is not always idempotent. But here we know it
|
||||
is "idempotent"-like because it has a transaction ID. We also need to do
|
||||
our own retry to special-case the ``INVALID_ARGUMENT`` error.
|
||||
|
||||
Args:
|
||||
client (:class:`~google.cloud.firestore_v1.client.Client`):
|
||||
A client with GAPIC client and configuration details.
|
||||
write_pbs (List[:class:`google.cloud.proto.firestore.v1.write_pb2.Write`, ...]):
|
||||
A ``Write`` protobuf instance to be committed.
|
||||
transaction_id (bytes):
|
||||
ID of an existing transaction that this commit will run in.
|
||||
|
||||
Returns:
|
||||
:class:`google.cloud.firestore_v1.types.CommitResponse`:
|
||||
The protobuf response from ``Commit``.
|
||||
|
||||
Raises:
|
||||
~google.api_core.exceptions.GoogleAPICallError: If a non-retryable
|
||||
exception is encountered.
|
||||
"""
|
||||
current_sleep = _INITIAL_SLEEP
|
||||
while True:
|
||||
try:
|
||||
return client._firestore_api.commit(
|
||||
client._database_string,
|
||||
write_pbs,
|
||||
transaction=transaction_id,
|
||||
metadata=client._rpc_metadata,
|
||||
)
|
||||
except exceptions.ServiceUnavailable:
|
||||
# Retry
|
||||
pass
|
||||
|
||||
current_sleep = _sleep(current_sleep)
|
||||
|
||||
|
||||
def _sleep(current_sleep, max_sleep=_MAX_SLEEP, multiplier=_MULTIPLIER):
|
||||
"""Sleep and produce a new sleep time.
|
||||
|
||||
.. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\
|
||||
2015/03/backoff.html
|
||||
|
||||
Select a duration between zero and ``current_sleep``. It might seem
|
||||
counterintuitive to have so much jitter, but
|
||||
`Exponential Backoff And Jitter`_ argues that "full jitter" is
|
||||
the best strategy.
|
||||
|
||||
Args:
|
||||
current_sleep (float): The current "max" for sleep interval.
|
||||
max_sleep (Optional[float]): Eventual "max" sleep time
|
||||
multiplier (Optional[float]): Multiplier for exponential backoff.
|
||||
|
||||
Returns:
|
||||
float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever
|
||||
is smaller)
|
||||
"""
|
||||
actual_sleep = random.uniform(0.0, current_sleep)
|
||||
time.sleep(actual_sleep)
|
||||
return min(multiplier * current_sleep, max_sleep)
|
151
venv/Lib/site-packages/google/cloud/firestore_v1/transforms.py
Normal file
151
venv/Lib/site-packages/google/cloud/firestore_v1/transforms.py
Normal file
|
@ -0,0 +1,151 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helpful constants to use for Google Cloud Firestore."""
|
||||
|
||||
|
||||
class Sentinel(object):
|
||||
"""Sentinel objects used to signal special handling."""
|
||||
|
||||
__slots__ = ("description",)
|
||||
|
||||
def __init__(self, description):
|
||||
self.description = description
|
||||
|
||||
def __repr__(self):
|
||||
return "Sentinel: {}".format(self.description)
|
||||
|
||||
|
||||
DELETE_FIELD = Sentinel("Value used to delete a field in a document.")
|
||||
|
||||
|
||||
SERVER_TIMESTAMP = Sentinel(
|
||||
"Value used to set a document field to the server timestamp."
|
||||
)
|
||||
|
||||
|
||||
class _ValueList(object):
|
||||
"""Read-only list of values.
|
||||
|
||||
Args:
|
||||
values (List | Tuple): values held in the helper.
|
||||
"""
|
||||
|
||||
slots = ("_values",)
|
||||
|
||||
def __init__(self, values):
|
||||
if not isinstance(values, (list, tuple)):
|
||||
raise ValueError("'values' must be a list or tuple.")
|
||||
|
||||
if len(values) == 0:
|
||||
raise ValueError("'values' must be non-empty.")
|
||||
|
||||
self._values = list(values)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self._values == other._values
|
||||
|
||||
@property
|
||||
def values(self):
|
||||
"""Values to append.
|
||||
|
||||
Returns (List):
|
||||
values to be appended by the transform.
|
||||
"""
|
||||
return self._values
|
||||
|
||||
|
||||
class ArrayUnion(_ValueList):
|
||||
"""Field transform: appends missing values to an array field.
|
||||
|
||||
See:
|
||||
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.append_missing_elements
|
||||
|
||||
Args:
|
||||
values (List | Tuple): values to append.
|
||||
"""
|
||||
|
||||
|
||||
class ArrayRemove(_ValueList):
|
||||
"""Field transform: remove values from an array field.
|
||||
|
||||
See:
|
||||
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.remove_all_from_array
|
||||
|
||||
Args:
|
||||
values (List | Tuple): values to remove.
|
||||
"""
|
||||
|
||||
|
||||
class _NumericValue(object):
|
||||
"""Hold a single integer / float value.
|
||||
|
||||
Args:
|
||||
value (int | float): value held in the helper.
|
||||
"""
|
||||
|
||||
def __init__(self, value):
|
||||
if not isinstance(value, (int, float)):
|
||||
raise ValueError("Pass an integer / float value.")
|
||||
|
||||
self._value = value
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
"""Value used by the transform.
|
||||
|
||||
Returns:
|
||||
(Integer | Float) value passed in the constructor.
|
||||
"""
|
||||
return self._value
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self._value == other._value
|
||||
|
||||
|
||||
class Increment(_NumericValue):
|
||||
"""Field transform: increment a numeric field with specified value.
|
||||
|
||||
See:
|
||||
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.increment
|
||||
|
||||
Args:
|
||||
value (int | float): value used to increment the field.
|
||||
"""
|
||||
|
||||
|
||||
class Maximum(_NumericValue):
|
||||
"""Field transform: bound numeric field with specified value.
|
||||
|
||||
See:
|
||||
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.maximum
|
||||
|
||||
Args:
|
||||
value (int | float): value used to bound the field.
|
||||
"""
|
||||
|
||||
|
||||
class Minimum(_NumericValue):
|
||||
"""Field transform: bound numeric field with specified value.
|
||||
|
||||
See:
|
||||
https://cloud.google.com/firestore/docs/reference/rpc/google.firestore.v1#google.firestore.v1.DocumentTransform.FieldTransform.FIELDS.google.firestore.v1.ArrayValue.google.firestore.v1.DocumentTransform.FieldTransform.minimum
|
||||
|
||||
Args:
|
||||
value (int | float): value used to bound the field.
|
||||
"""
|
63
venv/Lib/site-packages/google/cloud/firestore_v1/types.py
Normal file
63
venv/Lib/site-packages/google/cloud/firestore_v1/types.py
Normal file
|
@ -0,0 +1,63 @@
|
|||
# Copyright 2018 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import sys
|
||||
|
||||
from google.api import http_pb2
|
||||
from google.protobuf import any_pb2
|
||||
from google.protobuf import descriptor_pb2
|
||||
from google.protobuf import empty_pb2
|
||||
from google.protobuf import struct_pb2
|
||||
from google.protobuf import timestamp_pb2
|
||||
from google.protobuf import wrappers_pb2
|
||||
from google.rpc import status_pb2
|
||||
from google.type import latlng_pb2
|
||||
|
||||
from google.api_core.protobuf_helpers import get_messages
|
||||
from google.cloud.firestore_v1.proto import common_pb2
|
||||
from google.cloud.firestore_v1.proto import document_pb2
|
||||
from google.cloud.firestore_v1.proto import firestore_pb2
|
||||
from google.cloud.firestore_v1.proto import query_pb2
|
||||
from google.cloud.firestore_v1.proto import write_pb2
|
||||
|
||||
|
||||
_shared_modules = [
|
||||
http_pb2,
|
||||
any_pb2,
|
||||
descriptor_pb2,
|
||||
empty_pb2,
|
||||
struct_pb2,
|
||||
timestamp_pb2,
|
||||
wrappers_pb2,
|
||||
status_pb2,
|
||||
latlng_pb2,
|
||||
]
|
||||
|
||||
_local_modules = [common_pb2, document_pb2, firestore_pb2, query_pb2, write_pb2]
|
||||
|
||||
names = []
|
||||
|
||||
for module in _shared_modules:
|
||||
for name, message in get_messages(module).items():
|
||||
setattr(sys.modules[__name__], name, message)
|
||||
names.append(name)
|
||||
|
||||
for module in _local_modules:
|
||||
for name, message in get_messages(module).items():
|
||||
message.__module__ = "google.cloud.firestore_v1.types"
|
||||
setattr(sys.modules[__name__], name, message)
|
||||
names.append(name)
|
||||
|
||||
__all__ = tuple(sorted(names))
|
743
venv/Lib/site-packages/google/cloud/firestore_v1/watch.py
Normal file
743
venv/Lib/site-packages/google/cloud/firestore_v1/watch.py
Normal file
|
@ -0,0 +1,743 @@
|
|||
# Copyright 2017 Google LLC All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import collections
|
||||
import threading
|
||||
import datetime
|
||||
from enum import Enum
|
||||
import functools
|
||||
|
||||
import pytz
|
||||
|
||||
from google.api_core.bidi import ResumableBidiRpc
|
||||
from google.api_core.bidi import BackgroundConsumer
|
||||
from google.cloud.firestore_v1.proto import firestore_pb2
|
||||
from google.cloud.firestore_v1 import _helpers
|
||||
|
||||
from google.api_core import exceptions
|
||||
|
||||
import grpc
|
||||
|
||||
"""Python client for Google Cloud Firestore Watch."""
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
WATCH_TARGET_ID = 0x5079 # "Py"
|
||||
|
||||
GRPC_STATUS_CODE = {
|
||||
"OK": 0,
|
||||
"CANCELLED": 1,
|
||||
"UNKNOWN": 2,
|
||||
"INVALID_ARGUMENT": 3,
|
||||
"DEADLINE_EXCEEDED": 4,
|
||||
"NOT_FOUND": 5,
|
||||
"ALREADY_EXISTS": 6,
|
||||
"PERMISSION_DENIED": 7,
|
||||
"UNAUTHENTICATED": 16,
|
||||
"RESOURCE_EXHAUSTED": 8,
|
||||
"FAILED_PRECONDITION": 9,
|
||||
"ABORTED": 10,
|
||||
"OUT_OF_RANGE": 11,
|
||||
"UNIMPLEMENTED": 12,
|
||||
"INTERNAL": 13,
|
||||
"UNAVAILABLE": 14,
|
||||
"DATA_LOSS": 15,
|
||||
"DO_NOT_USE": -1,
|
||||
}
|
||||
_RPC_ERROR_THREAD_NAME = "Thread-OnRpcTerminated"
|
||||
_RECOVERABLE_STREAM_EXCEPTIONS = (
|
||||
exceptions.Aborted,
|
||||
exceptions.Cancelled,
|
||||
exceptions.Unknown,
|
||||
exceptions.DeadlineExceeded,
|
||||
exceptions.ResourceExhausted,
|
||||
exceptions.InternalServerError,
|
||||
exceptions.ServiceUnavailable,
|
||||
exceptions.Unauthenticated,
|
||||
)
|
||||
_TERMINATING_STREAM_EXCEPTIONS = (exceptions.Cancelled,)
|
||||
|
||||
DocTreeEntry = collections.namedtuple("DocTreeEntry", ["value", "index"])
|
||||
|
||||
|
||||
class WatchDocTree(object):
|
||||
# TODO: Currently this uses a dict. Other implementations us an rbtree.
|
||||
# The performance of this implementation should be investigated and may
|
||||
# require modifying the underlying datastructure to a rbtree.
|
||||
def __init__(self):
|
||||
self._dict = {}
|
||||
self._index = 0
|
||||
|
||||
def keys(self):
|
||||
return list(self._dict.keys())
|
||||
|
||||
def _copy(self):
|
||||
wdt = WatchDocTree()
|
||||
wdt._dict = self._dict.copy()
|
||||
wdt._index = self._index
|
||||
self = wdt
|
||||
return self
|
||||
|
||||
def insert(self, key, value):
|
||||
self = self._copy()
|
||||
self._dict[key] = DocTreeEntry(value, self._index)
|
||||
self._index += 1
|
||||
return self
|
||||
|
||||
def find(self, key):
|
||||
return self._dict[key]
|
||||
|
||||
def remove(self, key):
|
||||
self = self._copy()
|
||||
del self._dict[key]
|
||||
return self
|
||||
|
||||
def __iter__(self):
|
||||
for k in self._dict:
|
||||
yield k
|
||||
|
||||
def __len__(self):
|
||||
return len(self._dict)
|
||||
|
||||
def __contains__(self, k):
|
||||
return k in self._dict
|
||||
|
||||
|
||||
class ChangeType(Enum):
|
||||
ADDED = 1
|
||||
REMOVED = 2
|
||||
MODIFIED = 3
|
||||
|
||||
|
||||
class DocumentChange(object):
|
||||
def __init__(self, type, document, old_index, new_index):
|
||||
"""DocumentChange
|
||||
|
||||
Args:
|
||||
type (ChangeType):
|
||||
document (document.DocumentSnapshot):
|
||||
old_index (int):
|
||||
new_index (int):
|
||||
"""
|
||||
# TODO: spec indicated an isEqual param also
|
||||
self.type = type
|
||||
self.document = document
|
||||
self.old_index = old_index
|
||||
self.new_index = new_index
|
||||
|
||||
|
||||
class WatchResult(object):
|
||||
def __init__(self, snapshot, name, change_type):
|
||||
self.snapshot = snapshot
|
||||
self.name = name
|
||||
self.change_type = change_type
|
||||
|
||||
|
||||
def _maybe_wrap_exception(exception):
|
||||
"""Wraps a gRPC exception class, if needed."""
|
||||
if isinstance(exception, grpc.RpcError):
|
||||
return exceptions.from_grpc_error(exception)
|
||||
return exception
|
||||
|
||||
|
||||
def document_watch_comparator(doc1, doc2):
|
||||
assert doc1 == doc2, "Document watches only support one document."
|
||||
return 0
|
||||
|
||||
|
||||
def _should_recover(exception):
|
||||
wrapped = _maybe_wrap_exception(exception)
|
||||
return isinstance(wrapped, _RECOVERABLE_STREAM_EXCEPTIONS)
|
||||
|
||||
|
||||
def _should_terminate(exception):
|
||||
wrapped = _maybe_wrap_exception(exception)
|
||||
return isinstance(wrapped, _TERMINATING_STREAM_EXCEPTIONS)
|
||||
|
||||
|
||||
class Watch(object):
|
||||
|
||||
BackgroundConsumer = BackgroundConsumer # FBO unit tests
|
||||
ResumableBidiRpc = ResumableBidiRpc # FBO unit tests
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
document_reference,
|
||||
firestore,
|
||||
target,
|
||||
comparator,
|
||||
snapshot_callback,
|
||||
document_snapshot_cls,
|
||||
document_reference_cls,
|
||||
BackgroundConsumer=None, # FBO unit testing
|
||||
ResumableBidiRpc=None, # FBO unit testing
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
firestore:
|
||||
target:
|
||||
comparator:
|
||||
snapshot_callback: Callback method to process snapshots.
|
||||
Args:
|
||||
docs (List(DocumentSnapshot)): A callback that returns the
|
||||
ordered list of documents stored in this snapshot.
|
||||
changes (List(str)): A callback that returns the list of
|
||||
changed documents since the last snapshot delivered for
|
||||
this watch.
|
||||
read_time (string): The ISO 8601 time at which this
|
||||
snapshot was obtained.
|
||||
|
||||
document_snapshot_cls: instance of DocumentSnapshot
|
||||
document_reference_cls: instance of DocumentReference
|
||||
"""
|
||||
self._document_reference = document_reference
|
||||
self._firestore = firestore
|
||||
self._api = firestore._firestore_api
|
||||
self._targets = target
|
||||
self._comparator = comparator
|
||||
self.DocumentSnapshot = document_snapshot_cls
|
||||
self.DocumentReference = document_reference_cls
|
||||
self._snapshot_callback = snapshot_callback
|
||||
self._closing = threading.Lock()
|
||||
self._closed = False
|
||||
|
||||
self.resume_token = None
|
||||
|
||||
rpc_request = self._get_rpc_request
|
||||
|
||||
if ResumableBidiRpc is None:
|
||||
ResumableBidiRpc = self.ResumableBidiRpc # FBO unit tests
|
||||
|
||||
self._rpc = ResumableBidiRpc(
|
||||
self._api.transport.listen,
|
||||
should_recover=_should_recover,
|
||||
should_terminate=_should_terminate,
|
||||
initial_request=rpc_request,
|
||||
metadata=self._firestore._rpc_metadata,
|
||||
)
|
||||
|
||||
self._rpc.add_done_callback(self._on_rpc_done)
|
||||
|
||||
# Initialize state for on_snapshot
|
||||
# The sorted tree of QueryDocumentSnapshots as sent in the last
|
||||
# snapshot. We only look at the keys.
|
||||
self.doc_tree = WatchDocTree()
|
||||
|
||||
# A map of document names to QueryDocumentSnapshots for the last sent
|
||||
# snapshot.
|
||||
self.doc_map = {}
|
||||
|
||||
# The accumulates map of document changes (keyed by document name) for
|
||||
# the current snapshot.
|
||||
self.change_map = {}
|
||||
|
||||
# The current state of the query results.
|
||||
self.current = False
|
||||
|
||||
# We need this to track whether we've pushed an initial set of changes,
|
||||
# since we should push those even when there are no changes, if there
|
||||
# aren't docs.
|
||||
self.has_pushed = False
|
||||
|
||||
# The server assigns and updates the resume token.
|
||||
if BackgroundConsumer is None: # FBO unit tests
|
||||
BackgroundConsumer = self.BackgroundConsumer
|
||||
|
||||
self._consumer = BackgroundConsumer(self._rpc, self.on_snapshot)
|
||||
self._consumer.start()
|
||||
|
||||
def _get_rpc_request(self):
|
||||
if self.resume_token is not None:
|
||||
self._targets["resume_token"] = self.resume_token
|
||||
return firestore_pb2.ListenRequest(
|
||||
database=self._firestore._database_string, add_target=self._targets
|
||||
)
|
||||
|
||||
@property
|
||||
def is_active(self):
|
||||
"""bool: True if this manager is actively streaming.
|
||||
|
||||
Note that ``False`` does not indicate this is complete shut down,
|
||||
just that it stopped getting new messages.
|
||||
"""
|
||||
return self._consumer is not None and self._consumer.is_active
|
||||
|
||||
def close(self, reason=None):
|
||||
"""Stop consuming messages and shutdown all helper threads.
|
||||
|
||||
This method is idempotent. Additional calls will have no effect.
|
||||
|
||||
Args:
|
||||
reason (Any): The reason to close this. If None, this is considered
|
||||
an "intentional" shutdown.
|
||||
"""
|
||||
with self._closing:
|
||||
if self._closed:
|
||||
return
|
||||
|
||||
# Stop consuming messages.
|
||||
if self.is_active:
|
||||
_LOGGER.debug("Stopping consumer.")
|
||||
self._consumer.stop()
|
||||
self._consumer = None
|
||||
|
||||
self._rpc.close()
|
||||
self._rpc = None
|
||||
self._closed = True
|
||||
_LOGGER.debug("Finished stopping manager.")
|
||||
|
||||
if reason:
|
||||
# Raise an exception if a reason is provided
|
||||
_LOGGER.debug("reason for closing: %s" % reason)
|
||||
if isinstance(reason, Exception):
|
||||
raise reason
|
||||
raise RuntimeError(reason)
|
||||
|
||||
def _on_rpc_done(self, future):
|
||||
"""Triggered whenever the underlying RPC terminates without recovery.
|
||||
|
||||
This is typically triggered from one of two threads: the background
|
||||
consumer thread (when calling ``recv()`` produces a non-recoverable
|
||||
error) or the grpc management thread (when cancelling the RPC).
|
||||
|
||||
This method is *non-blocking*. It will start another thread to deal
|
||||
with shutting everything down. This is to prevent blocking in the
|
||||
background consumer and preventing it from being ``joined()``.
|
||||
"""
|
||||
_LOGGER.info("RPC termination has signaled manager shutdown.")
|
||||
future = _maybe_wrap_exception(future)
|
||||
thread = threading.Thread(
|
||||
name=_RPC_ERROR_THREAD_NAME, target=self.close, kwargs={"reason": future}
|
||||
)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
def unsubscribe(self):
|
||||
self.close()
|
||||
|
||||
@classmethod
|
||||
def for_document(
|
||||
cls,
|
||||
document_ref,
|
||||
snapshot_callback,
|
||||
snapshot_class_instance,
|
||||
reference_class_instance,
|
||||
):
|
||||
"""
|
||||
Creates a watch snapshot listener for a document. snapshot_callback
|
||||
receives a DocumentChange object, but may also start to get
|
||||
targetChange and such soon
|
||||
|
||||
Args:
|
||||
document_ref: Reference to Document
|
||||
snapshot_callback: callback to be called on snapshot
|
||||
snapshot_class_instance: instance of DocumentSnapshot to make
|
||||
snapshots with to pass to snapshot_callback
|
||||
reference_class_instance: instance of DocumentReference to make
|
||||
references
|
||||
|
||||
"""
|
||||
return cls(
|
||||
document_ref,
|
||||
document_ref._client,
|
||||
{
|
||||
"documents": {"documents": [document_ref._document_path]},
|
||||
"target_id": WATCH_TARGET_ID,
|
||||
},
|
||||
document_watch_comparator,
|
||||
snapshot_callback,
|
||||
snapshot_class_instance,
|
||||
reference_class_instance,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def for_query(
|
||||
cls, query, snapshot_callback, snapshot_class_instance, reference_class_instance
|
||||
):
|
||||
parent_path, _ = query._parent._parent_info()
|
||||
query_target = firestore_pb2.Target.QueryTarget(
|
||||
parent=parent_path, structured_query=query._to_protobuf()
|
||||
)
|
||||
|
||||
return cls(
|
||||
query,
|
||||
query._client,
|
||||
{"query": query_target, "target_id": WATCH_TARGET_ID},
|
||||
query._comparator,
|
||||
snapshot_callback,
|
||||
snapshot_class_instance,
|
||||
reference_class_instance,
|
||||
)
|
||||
|
||||
def _on_snapshot_target_change_no_change(self, proto):
|
||||
_LOGGER.debug("on_snapshot: target change: NO_CHANGE")
|
||||
change = proto.target_change
|
||||
|
||||
no_target_ids = change.target_ids is None or len(change.target_ids) == 0
|
||||
if no_target_ids and change.read_time and self.current:
|
||||
# TargetChange.CURRENT followed by TargetChange.NO_CHANGE
|
||||
# signals a consistent state. Invoke the onSnapshot
|
||||
# callback as specified by the user.
|
||||
self.push(change.read_time, change.resume_token)
|
||||
|
||||
def _on_snapshot_target_change_add(self, proto):
|
||||
_LOGGER.debug("on_snapshot: target change: ADD")
|
||||
target_id = proto.target_change.target_ids[0]
|
||||
if target_id != WATCH_TARGET_ID:
|
||||
raise RuntimeError("Unexpected target ID %s sent by server" % target_id)
|
||||
|
||||
def _on_snapshot_target_change_remove(self, proto):
|
||||
_LOGGER.debug("on_snapshot: target change: REMOVE")
|
||||
change = proto.target_change
|
||||
|
||||
code = 13
|
||||
message = "internal error"
|
||||
if change.cause:
|
||||
code = change.cause.code
|
||||
message = change.cause.message
|
||||
|
||||
message = "Error %s: %s" % (code, message)
|
||||
|
||||
raise RuntimeError(message)
|
||||
|
||||
def _on_snapshot_target_change_reset(self, proto):
|
||||
# Whatever changes have happened so far no longer matter.
|
||||
_LOGGER.debug("on_snapshot: target change: RESET")
|
||||
self._reset_docs()
|
||||
|
||||
def _on_snapshot_target_change_current(self, proto):
|
||||
_LOGGER.debug("on_snapshot: target change: CURRENT")
|
||||
self.current = True
|
||||
|
||||
def on_snapshot(self, proto):
|
||||
"""
|
||||
Called everytime there is a response from listen. Collect changes
|
||||
and 'push' the changes in a batch to the customer when we receive
|
||||
'current' from the listen response.
|
||||
|
||||
Args:
|
||||
listen_response(`google.cloud.firestore_v1.types.ListenResponse`):
|
||||
Callback method that receives a object to
|
||||
"""
|
||||
TargetChange = firestore_pb2.TargetChange
|
||||
|
||||
target_changetype_dispatch = {
|
||||
TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change,
|
||||
TargetChange.ADD: self._on_snapshot_target_change_add,
|
||||
TargetChange.REMOVE: self._on_snapshot_target_change_remove,
|
||||
TargetChange.RESET: self._on_snapshot_target_change_reset,
|
||||
TargetChange.CURRENT: self._on_snapshot_target_change_current,
|
||||
}
|
||||
|
||||
target_change = getattr(proto, "target_change", "")
|
||||
document_change = getattr(proto, "document_change", "")
|
||||
document_delete = getattr(proto, "document_delete", "")
|
||||
document_remove = getattr(proto, "document_remove", "")
|
||||
filter_ = getattr(proto, "filter", "")
|
||||
|
||||
if str(target_change):
|
||||
target_change_type = target_change.target_change_type
|
||||
_LOGGER.debug("on_snapshot: target change: " + str(target_change_type))
|
||||
meth = target_changetype_dispatch.get(target_change_type)
|
||||
if meth is None:
|
||||
_LOGGER.info(
|
||||
"on_snapshot: Unknown target change " + str(target_change_type)
|
||||
)
|
||||
self.close(
|
||||
reason="Unknown target change type: %s " % str(target_change_type)
|
||||
)
|
||||
else:
|
||||
try:
|
||||
meth(proto)
|
||||
except Exception as exc2:
|
||||
_LOGGER.debug("meth(proto) exc: " + str(exc2))
|
||||
raise
|
||||
|
||||
# NOTE:
|
||||
# in other implementations, such as node, the backoff is reset here
|
||||
# in this version bidi rpc is just used and will control this.
|
||||
|
||||
elif str(document_change):
|
||||
_LOGGER.debug("on_snapshot: document change")
|
||||
|
||||
# No other target_ids can show up here, but we still need to see
|
||||
# if the targetId was in the added list or removed list.
|
||||
target_ids = document_change.target_ids or []
|
||||
removed_target_ids = document_change.removed_target_ids or []
|
||||
changed = False
|
||||
removed = False
|
||||
|
||||
if WATCH_TARGET_ID in target_ids:
|
||||
changed = True
|
||||
|
||||
if WATCH_TARGET_ID in removed_target_ids:
|
||||
removed = True
|
||||
|
||||
if changed:
|
||||
_LOGGER.debug("on_snapshot: document change: CHANGED")
|
||||
|
||||
# google.cloud.firestore_v1.types.Document
|
||||
document = document_change.document
|
||||
|
||||
data = _helpers.decode_dict(document.fields, self._firestore)
|
||||
|
||||
# Create a snapshot. As Document and Query objects can be
|
||||
# passed we need to get a Document Reference in a more manual
|
||||
# fashion than self._document_reference
|
||||
document_name = document.name
|
||||
db_str = self._firestore._database_string
|
||||
db_str_documents = db_str + "/documents/"
|
||||
if document_name.startswith(db_str_documents):
|
||||
document_name = document_name[len(db_str_documents) :]
|
||||
|
||||
document_ref = self._firestore.document(document_name)
|
||||
|
||||
snapshot = self.DocumentSnapshot(
|
||||
reference=document_ref,
|
||||
data=data,
|
||||
exists=True,
|
||||
read_time=None,
|
||||
create_time=document.create_time,
|
||||
update_time=document.update_time,
|
||||
)
|
||||
self.change_map[document.name] = snapshot
|
||||
|
||||
elif removed:
|
||||
_LOGGER.debug("on_snapshot: document change: REMOVED")
|
||||
document = document_change.document
|
||||
self.change_map[document.name] = ChangeType.REMOVED
|
||||
|
||||
# NB: document_delete and document_remove (as far as we, the client,
|
||||
# are concerned) are functionally equivalent
|
||||
|
||||
elif str(document_delete):
|
||||
_LOGGER.debug("on_snapshot: document change: DELETE")
|
||||
name = document_delete.document
|
||||
self.change_map[name] = ChangeType.REMOVED
|
||||
|
||||
elif str(document_remove):
|
||||
_LOGGER.debug("on_snapshot: document change: REMOVE")
|
||||
name = document_remove.document
|
||||
self.change_map[name] = ChangeType.REMOVED
|
||||
|
||||
elif filter_:
|
||||
_LOGGER.debug("on_snapshot: filter update")
|
||||
if filter_.count != self._current_size():
|
||||
# We need to remove all the current results.
|
||||
self._reset_docs()
|
||||
# The filter didn't match, so re-issue the query.
|
||||
# TODO: reset stream method?
|
||||
# self._reset_stream();
|
||||
|
||||
elif proto is None:
|
||||
self.close()
|
||||
else:
|
||||
_LOGGER.debug("UNKNOWN TYPE. UHOH")
|
||||
self.close(reason=ValueError("Unknown listen response type: %s" % proto))
|
||||
|
||||
def push(self, read_time, next_resume_token):
|
||||
"""
|
||||
Assembles a new snapshot from the current set of changes and invokes
|
||||
the user's callback. Clears the current changes on completion.
|
||||
"""
|
||||
deletes, adds, updates = Watch._extract_changes(
|
||||
self.doc_map, self.change_map, read_time
|
||||
)
|
||||
|
||||
updated_tree, updated_map, appliedChanges = self._compute_snapshot(
|
||||
self.doc_tree, self.doc_map, deletes, adds, updates
|
||||
)
|
||||
|
||||
if not self.has_pushed or len(appliedChanges):
|
||||
# TODO: It is possible in the future we will have the tree order
|
||||
# on insert. For now, we sort here.
|
||||
key = functools.cmp_to_key(self._comparator)
|
||||
keys = sorted(updated_tree.keys(), key=key)
|
||||
|
||||
self._snapshot_callback(
|
||||
keys,
|
||||
appliedChanges,
|
||||
datetime.datetime.fromtimestamp(read_time.seconds, pytz.utc),
|
||||
)
|
||||
self.has_pushed = True
|
||||
|
||||
self.doc_tree = updated_tree
|
||||
self.doc_map = updated_map
|
||||
self.change_map.clear()
|
||||
self.resume_token = next_resume_token
|
||||
|
||||
@staticmethod
|
||||
def _extract_changes(doc_map, changes, read_time):
|
||||
deletes = []
|
||||
adds = []
|
||||
updates = []
|
||||
|
||||
for name, value in changes.items():
|
||||
if value == ChangeType.REMOVED:
|
||||
if name in doc_map:
|
||||
deletes.append(name)
|
||||
elif name in doc_map:
|
||||
if read_time is not None:
|
||||
value.read_time = read_time
|
||||
updates.append(value)
|
||||
else:
|
||||
if read_time is not None:
|
||||
value.read_time = read_time
|
||||
adds.append(value)
|
||||
|
||||
return (deletes, adds, updates)
|
||||
|
||||
def _compute_snapshot(
|
||||
self, doc_tree, doc_map, delete_changes, add_changes, update_changes
|
||||
):
|
||||
updated_tree = doc_tree
|
||||
updated_map = doc_map
|
||||
|
||||
assert len(doc_tree) == len(doc_map), (
|
||||
"The document tree and document map should have the same "
|
||||
+ "number of entries."
|
||||
)
|
||||
|
||||
def delete_doc(name, updated_tree, updated_map):
|
||||
"""
|
||||
Applies a document delete to the document tree and document map.
|
||||
Returns the corresponding DocumentChange event.
|
||||
"""
|
||||
assert name in updated_map, "Document to delete does not exist"
|
||||
old_document = updated_map.get(name)
|
||||
# TODO: If a document doesn't exist this raises IndexError. Handle?
|
||||
existing = updated_tree.find(old_document)
|
||||
old_index = existing.index
|
||||
updated_tree = updated_tree.remove(old_document)
|
||||
del updated_map[name]
|
||||
return (
|
||||
DocumentChange(ChangeType.REMOVED, old_document, old_index, -1),
|
||||
updated_tree,
|
||||
updated_map,
|
||||
)
|
||||
|
||||
def add_doc(new_document, updated_tree, updated_map):
|
||||
"""
|
||||
Applies a document add to the document tree and the document map.
|
||||
Returns the corresponding DocumentChange event.
|
||||
"""
|
||||
name = new_document.reference._document_path
|
||||
assert name not in updated_map, "Document to add already exists"
|
||||
updated_tree = updated_tree.insert(new_document, None)
|
||||
new_index = updated_tree.find(new_document).index
|
||||
updated_map[name] = new_document
|
||||
return (
|
||||
DocumentChange(ChangeType.ADDED, new_document, -1, new_index),
|
||||
updated_tree,
|
||||
updated_map,
|
||||
)
|
||||
|
||||
def modify_doc(new_document, updated_tree, updated_map):
|
||||
"""
|
||||
Applies a document modification to the document tree and the
|
||||
document map.
|
||||
Returns the DocumentChange event for successful modifications.
|
||||
"""
|
||||
name = new_document.reference._document_path
|
||||
assert name in updated_map, "Document to modify does not exist"
|
||||
old_document = updated_map.get(name)
|
||||
if old_document.update_time != new_document.update_time:
|
||||
remove_change, updated_tree, updated_map = delete_doc(
|
||||
name, updated_tree, updated_map
|
||||
)
|
||||
add_change, updated_tree, updated_map = add_doc(
|
||||
new_document, updated_tree, updated_map
|
||||
)
|
||||
return (
|
||||
DocumentChange(
|
||||
ChangeType.MODIFIED,
|
||||
new_document,
|
||||
remove_change.old_index,
|
||||
add_change.new_index,
|
||||
),
|
||||
updated_tree,
|
||||
updated_map,
|
||||
)
|
||||
|
||||
return None, updated_tree, updated_map
|
||||
|
||||
# Process the sorted changes in the order that is expected by our
|
||||
# clients (removals, additions, and then modifications). We also need
|
||||
# to sort the individual changes to assure that old_index/new_index
|
||||
# keep incrementing.
|
||||
appliedChanges = []
|
||||
|
||||
key = functools.cmp_to_key(self._comparator)
|
||||
|
||||
# Deletes are sorted based on the order of the existing document.
|
||||
delete_changes = sorted(delete_changes)
|
||||
for name in delete_changes:
|
||||
change, updated_tree, updated_map = delete_doc(
|
||||
name, updated_tree, updated_map
|
||||
)
|
||||
appliedChanges.append(change)
|
||||
|
||||
add_changes = sorted(add_changes, key=key)
|
||||
_LOGGER.debug("walk over add_changes")
|
||||
for snapshot in add_changes:
|
||||
_LOGGER.debug("in add_changes")
|
||||
change, updated_tree, updated_map = add_doc(
|
||||
snapshot, updated_tree, updated_map
|
||||
)
|
||||
appliedChanges.append(change)
|
||||
|
||||
update_changes = sorted(update_changes, key=key)
|
||||
for snapshot in update_changes:
|
||||
change, updated_tree, updated_map = modify_doc(
|
||||
snapshot, updated_tree, updated_map
|
||||
)
|
||||
if change is not None:
|
||||
appliedChanges.append(change)
|
||||
|
||||
assert len(updated_tree) == len(updated_map), (
|
||||
"The update document "
|
||||
+ "tree and document map should have the same number of entries."
|
||||
)
|
||||
return (updated_tree, updated_map, appliedChanges)
|
||||
|
||||
def _affects_target(self, target_ids, current_id):
|
||||
if target_ids is None:
|
||||
return True
|
||||
|
||||
return current_id in target_ids
|
||||
|
||||
def _current_size(self):
|
||||
"""
|
||||
Returns the current count of all documents, including the changes from
|
||||
the current changeMap.
|
||||
"""
|
||||
deletes, adds, _ = Watch._extract_changes(self.doc_map, self.change_map, None)
|
||||
return len(self.doc_map) + len(adds) - len(deletes)
|
||||
|
||||
def _reset_docs(self):
|
||||
"""
|
||||
Helper to clear the docs on RESET or filter mismatch.
|
||||
"""
|
||||
_LOGGER.debug("resetting documents")
|
||||
self.change_map.clear()
|
||||
self.resume_token = None
|
||||
|
||||
# Mark each document as deleted. If documents are not deleted
|
||||
# they will be sent again by the server.
|
||||
for snapshot in self.doc_tree.keys():
|
||||
name = snapshot.reference._document_path
|
||||
self.change_map[name] = ChangeType.REMOVED
|
||||
|
||||
self.current = False
|
Loading…
Add table
Add a link
Reference in a new issue