Updated DB_Helper by adding firebase methods.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-05 16:53:40 -04:00
parent 485cc3bbba
commit c82121d036
1810 changed files with 537281 additions and 1 deletions

View file

@ -0,0 +1,38 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable API package."""
from gcloud.bigtable.client import Client
_ERR_MSG = """\
gRPC is required for using the Cloud Bigtable API, but
importing the gRPC library (grpcio in PyPI) has failed.
As of June 2016, grpcio is only supported in Python 2.7,
which unfortunately means the Cloud Bigtable API isn't
available if you're using Python 3 or Python < 2.7.
If you're using Python 2.7 and importing / installing
grpcio has failed, this likely means you have a non-standard version
of Python installed. Check http://grpc.io if you're
having trouble installing the grpcio package.
"""
try:
import grpc.beta.implementations
except ImportError as exc: # pragma: NO COVER
raise ImportError(_ERR_MSG, exc)

View file

@ -0,0 +1,15 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated protobuf modules for Google Cloud Bigtable API."""

View file

@ -0,0 +1,93 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.cluster.v1;
import "google/api/annotations.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableClusterDataProto";
option java_package = "com.google.bigtable.admin.cluster.v1";
// A physical location in which a particular project can allocate Cloud BigTable
// resources.
message Zone {
// Possible states of a zone.
enum Status {
// The state of the zone is unknown or unspecified.
UNKNOWN = 0;
// The zone is in a good state.
OK = 1;
// The zone is down for planned maintenance.
PLANNED_MAINTENANCE = 2;
// The zone is down for emergency or unplanned maintenance.
EMERGENCY_MAINENANCE = 3;
}
// A permanent unique identifier for the zone.
// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*
string name = 1;
// The name of this zone as it appears in UIs.
string display_name = 2;
// The current state of this zone.
Status status = 3;
}
// An isolated set of Cloud BigTable resources on which tables can be hosted.
message Cluster {
// A permanent unique identifier for the cluster. For technical reasons, the
// zone in which the cluster resides is included here.
// Values are of the form
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
string name = 1;
// The operation currently running on the cluster, if any.
// This cannot be set directly, only through CreateCluster, UpdateCluster,
// or UndeleteCluster. Calls to these methods will be rejected if
// "current_operation" is already set.
google.longrunning.Operation current_operation = 3;
// The descriptive name for this cluster as it appears in UIs.
// Must be unique per zone.
string display_name = 4;
// The number of serve nodes allocated to this cluster.
int32 serve_nodes = 5;
// What storage type to use for tables in this cluster. Only configurable at
// cluster creation time. If unspecified, STORAGE_SSD will be used.
StorageType default_storage_type = 8;
}
enum StorageType {
// The storage type used is unspecified.
STORAGE_UNSPECIFIED = 0;
// Data will be stored in SSD, providing low and consistent latencies.
STORAGE_SSD = 1;
// Data will be stored in HDD, providing high and less predictable
// latencies.
STORAGE_HDD = 2;
}

View file

@ -0,0 +1,129 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.cluster.v1;
import "google/api/annotations.proto";
import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto";
import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/empty.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableClusterServicesProto";
option java_package = "com.google.bigtable.admin.cluster.v1";
// Service for managing zonal Cloud Bigtable resources.
service BigtableClusterService {
// Lists the supported zones for the given project.
rpc ListZones(ListZonesRequest) returns (ListZonesResponse) {
option (google.api.http) = { get: "/v1/{name=projects/*}/zones" };
}
// Gets information about a particular cluster.
rpc GetCluster(GetClusterRequest) returns (Cluster) {
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" };
}
// Lists all clusters in the given project, along with any zones for which
// cluster information could not be retrieved.
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" };
}
// Creates a cluster and begins preparing it to begin serving. The returned
// cluster embeds as its "current_operation" a long-running operation which
// can be used to track the progress of turning up the new cluster.
// Immediately upon completion of this request:
// * The cluster will be readable via the API, with all requested attributes
// but no allocated resources.
// Until completion of the embedded operation:
// * Cancelling the operation will render the cluster immediately unreadable
// via the API.
// * All other attempts to modify or delete the cluster will be rejected.
// Upon completion of the embedded operation:
// * Billing for all successfully-allocated resources will begin (some types
// may have lower than the requested levels).
// * New tables can be created in the cluster.
// * The cluster's allocated resource levels will be readable via the API.
// The embedded operation's "metadata" field type is
// [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
rpc CreateCluster(CreateClusterRequest) returns (Cluster) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" };
}
// Updates a cluster, and begins allocating or releasing resources as
// requested. The returned cluster embeds as its "current_operation" a
// long-running operation which can be used to track the progress of updating
// the cluster.
// Immediately upon completion of this request:
// * For resource types where a decrease in the cluster's allocation has been
// requested, billing will be based on the newly-requested level.
// Until completion of the embedded operation:
// * Cancelling the operation will set its metadata's "cancelled_at_time",
// and begin restoring resources to their pre-request values. The operation
// is guaranteed to succeed at undoing all resource changes, after which
// point it will terminate with a CANCELLED status.
// * All other attempts to modify or delete the cluster will be rejected.
// * Reading the cluster via the API will continue to give the pre-request
// resource levels.
// Upon completion of the embedded operation:
// * Billing will begin for all successfully-allocated resources (some types
// may have lower than the requested levels).
// * All newly-reserved resources will be available for serving the cluster's
// tables.
// * The cluster's new resource levels will be readable via the API.
// [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
rpc UpdateCluster(Cluster) returns (Cluster) {
option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" };
}
// Marks a cluster and all of its tables for permanent deletion in 7 days.
// Immediately upon completion of the request:
// * Billing will cease for all of the cluster's reserved resources.
// * The cluster's "delete_time" field will be set 7 days in the future.
// Soon afterward:
// * All tables within the cluster will become unavailable.
// Prior to the cluster's "delete_time":
// * The cluster can be recovered with a call to UndeleteCluster.
// * All other attempts to modify or delete the cluster will be rejected.
// At the cluster's "delete_time":
// * The cluster and *all of its tables* will immediately and irrevocably
// disappear from the API, and their data will be permanently deleted.
rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" };
}
// Cancels the scheduled deletion of an cluster and begins preparing it to
// resume serving. The returned operation will also be embedded as the
// cluster's "current_operation".
// Immediately upon completion of this request:
// * The cluster's "delete_time" field will be unset, protecting it from
// automatic deletion.
// Until completion of the returned operation:
// * The operation cannot be cancelled.
// Upon completion of the returned operation:
// * Billing for the cluster's resources will resume.
// * All tables within the cluster will be available.
// [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "null" };
}
}

View file

@ -0,0 +1,134 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.cluster.v1;
import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableClusterServiceMessagesProto";
option java_package = "com.google.bigtable.admin.cluster.v1";
// Request message for BigtableClusterService.ListZones.
message ListZonesRequest {
// The unique name of the project for which a list of supported zones is
// requested.
// Values are of the form projects/<project>
string name = 1;
}
// Response message for BigtableClusterService.ListZones.
message ListZonesResponse {
// The list of requested zones.
repeated Zone zones = 1;
}
// Request message for BigtableClusterService.GetCluster.
message GetClusterRequest {
// The unique name of the requested cluster.
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
string name = 1;
}
// Request message for BigtableClusterService.ListClusters.
message ListClustersRequest {
// The unique name of the project for which a list of clusters is requested.
// Values are of the form projects/<project>
string name = 1;
}
// Response message for BigtableClusterService.ListClusters.
message ListClustersResponse {
// The list of requested Clusters.
repeated Cluster clusters = 1;
// The zones for which clusters could not be retrieved.
repeated Zone failed_zones = 2;
}
// Request message for BigtableClusterService.CreateCluster.
message CreateClusterRequest {
// The unique name of the zone in which to create the cluster.
// Values are of the form projects/<project>/zones/<zone>
string name = 1;
// The id to be used when referring to the new cluster within its zone,
// e.g. just the "test-cluster" section of the full name
// "projects/<project>/zones/<zone>/clusters/test-cluster".
string cluster_id = 2;
// The cluster to create.
// The "name", "delete_time", and "current_operation" fields must be left
// blank.
Cluster cluster = 3;
}
// Metadata type for the operation returned by
// BigtableClusterService.CreateCluster.
message CreateClusterMetadata {
// The request which prompted the creation of this operation.
CreateClusterRequest original_request = 1;
// The time at which original_request was received.
google.protobuf.Timestamp request_time = 2;
// The time at which this operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 3;
}
// Metadata type for the operation returned by
// BigtableClusterService.UpdateCluster.
message UpdateClusterMetadata {
// The request which prompted the creation of this operation.
Cluster original_request = 1;
// The time at which original_request was received.
google.protobuf.Timestamp request_time = 2;
// The time at which this operation was cancelled. If set, this operation is
// in the process of undoing itself (which is guaranteed to succeed) and
// cannot be cancelled again.
google.protobuf.Timestamp cancel_time = 3;
// The time at which this operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 4;
}
// Request message for BigtableClusterService.DeleteCluster.
message DeleteClusterRequest {
// The unique name of the cluster to be deleted.
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
string name = 1;
}
// Request message for BigtableClusterService.UndeleteCluster.
message UndeleteClusterRequest {
// The unique name of the cluster to be un-deleted.
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
string name = 1;
}
// Metadata type for the operation returned by
// BigtableClusterService.UndeleteCluster.
message UndeleteClusterMetadata {
// The time at which the original request was received.
google.protobuf.Timestamp request_time = 1;
// The time at which this operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 2;
}

View file

@ -0,0 +1,515 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
option java_multiple_files = true;
option java_outer_classname = "BigtableDataProto";
option java_package = "com.google.bigtable.v1";
// Specifies the complete (requested) contents of a single row of a table.
// Rows which exceed 256MiB in size cannot be read in full.
message Row {
// The unique key which identifies this row within its table. This is the same
// key that's used to identify the row in, for example, a MutateRowRequest.
// May contain any non-empty byte string up to 4KiB in length.
bytes key = 1;
// May be empty, but only if the entire row is empty.
// The mutual ordering of column families is not specified.
repeated Family families = 2;
}
// Specifies (some of) the contents of a single row/column family of a table.
message Family {
// The unique key which identifies this family within its row. This is the
// same key that's used to identify the family in, for example, a RowFilter
// which sets its "family_name_regex_filter" field.
// Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may
// produce cells in a sentinel family with an empty name.
// Must be no greater than 64 characters in length.
string name = 1;
// Must not be empty. Sorted in order of increasing "qualifier".
repeated Column columns = 2;
}
// Specifies (some of) the contents of a single row/column of a table.
message Column {
// The unique key which identifies this column within its family. This is the
// same key that's used to identify the column in, for example, a RowFilter
// which sets its "column_qualifier_regex_filter" field.
// May contain any byte string, including the empty string, up to 16kiB in
// length.
bytes qualifier = 1;
// Must not be empty. Sorted in order of decreasing "timestamp_micros".
repeated Cell cells = 2;
}
// Specifies (some of) the contents of a single row/column/timestamp of a table.
message Cell {
// The cell's stored timestamp, which also uniquely identifies it within
// its column.
// Values are always expressed in microseconds, but individual tables may set
// a coarser "granularity" to further restrict the allowed values. For
// example, a table which specifies millisecond granularity will only allow
// values of "timestamp_micros" which are multiples of 1000.
int64 timestamp_micros = 1;
// The value stored in the cell.
// May contain any byte string, including the empty string, up to 100MiB in
// length.
bytes value = 2;
// Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter].
repeated string labels = 3;
}
// Specifies a contiguous range of rows.
message RowRange {
// Inclusive lower bound. If left empty, interpreted as the empty string.
bytes start_key = 2;
// Exclusive upper bound. If left empty, interpreted as infinity.
bytes end_key = 3;
}
// Specifies a non-contiguous set of rows.
message RowSet {
// Single rows included in the set.
repeated bytes row_keys = 1;
// Contiguous row ranges included in the set.
repeated RowRange row_ranges = 2;
}
// Specifies a contiguous range of columns within a single column family.
// The range spans from <column_family>:<start_qualifier> to
// <column_family>:<end_qualifier>, where both bounds can be either inclusive or
// exclusive.
message ColumnRange {
// The name of the column family within which this range falls.
string family_name = 1;
// The column qualifier at which to start the range (within 'column_family').
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_qualifier {
// Used when giving an inclusive lower bound for the range.
bytes start_qualifier_inclusive = 2;
// Used when giving an exclusive lower bound for the range.
bytes start_qualifier_exclusive = 3;
}
// The column qualifier at which to end the range (within 'column_family').
// If neither field is set, interpreted as the infinite string, exclusive.
oneof end_qualifier {
// Used when giving an inclusive upper bound for the range.
bytes end_qualifier_inclusive = 4;
// Used when giving an exclusive upper bound for the range.
bytes end_qualifier_exclusive = 5;
}
}
// Specified a contiguous range of microsecond timestamps.
message TimestampRange {
// Inclusive lower bound. If left empty, interpreted as 0.
int64 start_timestamp_micros = 1;
// Exclusive upper bound. If left empty, interpreted as infinity.
int64 end_timestamp_micros = 2;
}
// Specifies a contiguous range of raw byte values.
message ValueRange {
// The value at which to start the range.
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_value {
// Used when giving an inclusive lower bound for the range.
bytes start_value_inclusive = 1;
// Used when giving an exclusive lower bound for the range.
bytes start_value_exclusive = 2;
}
// The value at which to end the range.
// If neither field is set, interpreted as the infinite string, exclusive.
oneof end_value {
// Used when giving an inclusive upper bound for the range.
bytes end_value_inclusive = 3;
// Used when giving an exclusive upper bound for the range.
bytes end_value_exclusive = 4;
}
}
// Takes a row as input and produces an alternate view of the row based on
// specified rules. For example, a RowFilter might trim down a row to include
// just the cells from columns matching a given regular expression, or might
// return all the cells of a row but not their values. More complicated filters
// can be composed out of these components to express requests such as, "within
// every column of a particular family, give just the two most recent cells
// which are older than timestamp X."
//
// There are two broad categories of RowFilters (true filters and transformers),
// as well as two ways to compose simple filters into more complex ones
// (chains and interleaves). They work as follows:
//
// * True filters alter the input row by excluding some of its cells wholesale
// from the output row. An example of a true filter is the "value_regex_filter",
// which excludes cells whose values don't match the specified pattern. All
// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
// important point to keep in mind is that RE2(.) is equivalent by default to
// RE2([^\n]), meaning that it does not match newlines. When attempting to match
// an arbitrary byte, you should therefore use the escape sequence '\C', which
// may need to be further escaped as '\\C' in your client language.
//
// * Transformers alter the input row by changing the values of some of its
// cells in the output, without excluding them completely. Currently, the only
// supported transformer is the "strip_value_transformer", which replaces every
// cell's value with the empty string.
//
// * Chains and interleaves are described in more detail in the
// RowFilter.Chain and RowFilter.Interleave documentation.
//
// The total serialized size of a RowFilter message must not
// exceed 4096 bytes, and RowFilters may not be nested within each other
// (in Chains or Interleaves) to a depth of more than 20.
message RowFilter {
// A RowFilter which sends rows through several RowFilters in sequence.
message Chain {
// The elements of "filters" are chained together to process the input row:
// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
// The full chain is executed atomically.
repeated RowFilter filters = 1;
}
// A RowFilter which sends each row to each of several component
// RowFilters and interleaves the results.
message Interleave {
// The elements of "filters" all process a copy of the input row, and the
// results are pooled, sorted, and combined into a single output row.
// If multiple cells are produced with the same column and timestamp,
// they will all appear in the output row in an unspecified mutual order.
// Consider the following example, with three filters:
//
// input row
// |
// -----------------------------------------------------
// | | |
// f(0) f(1) f(2)
// | | |
// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
// 2: foo,blah,11,z far,blah,5,x far,blah,5,x
// | | |
// -----------------------------------------------------
// |
// 1: foo,bar,10,z // could have switched with #2
// 2: foo,bar,10,x // could have switched with #1
// 3: foo,blah,11,z
// 4: far,bar,7,a
// 5: far,blah,5,x // identical to #6
// 6: far,blah,5,x // identical to #5
// All interleaved filters are executed atomically.
repeated RowFilter filters = 1;
}
// A RowFilter which evaluates one of two possible RowFilters, depending on
// whether or not a predicate RowFilter outputs any cells from the input row.
//
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
// true and false filters, which may lead to inconsistent or unexpected
// results. Additionally, Condition filters have poor performance, especially
// when filters are set for the false condition.
message Condition {
// If "predicate_filter" outputs any cells, then "true_filter" will be
// evaluated on the input row. Otherwise, "false_filter" will be evaluated.
RowFilter predicate_filter = 1;
// The filter to apply to the input row if "predicate_filter" returns any
// results. If not provided, no results will be returned in the true case.
RowFilter true_filter = 2;
// The filter to apply to the input row if "predicate_filter" does not
// return any results. If not provided, no results will be returned in the
// false case.
RowFilter false_filter = 3;
}
// Which of the possible RowFilter types to apply. If none are set, this
// RowFilter returns all cells in the input row.
oneof filter {
// Applies several RowFilters to the data in sequence, progressively
// narrowing the results.
Chain chain = 1;
// Applies several RowFilters to the data in parallel and combines the
// results.
Interleave interleave = 2;
// Applies one of two possible RowFilters to the data based on the output of
// a predicate RowFilter.
Condition condition = 3;
// ADVANCED USE ONLY.
// Hook for introspection into the RowFilter. Outputs all cells directly to
// the output of the read rather than to any parent filter. Consider the
// following example:
//
// Chain(
// FamilyRegex("A"),
// Interleave(
// All(),
// Chain(Label("foo"), Sink())
// ),
// QualifierRegex("B")
// )
//
// A,A,1,w
// A,B,2,x
// B,B,4,z
// |
// FamilyRegex("A")
// |
// A,A,1,w
// A,B,2,x
// |
// +------------+-------------+
// | |
// All() Label(foo)
// | |
// A,A,1,w A,A,1,w,labels:[foo]
// A,B,2,x A,B,2,x,labels:[foo]
// | |
// | Sink() --------------+
// | | |
// +------------+ x------+ A,A,1,w,labels:[foo]
// | A,B,2,x,labels:[foo]
// A,A,1,w |
// A,B,2,x |
// | |
// QualifierRegex("B") |
// | |
// A,B,2,x |
// | |
// +--------------------------------+
// |
// A,A,1,w,labels:[foo]
// A,B,2,x,labels:[foo] // could be switched
// A,B,2,x // could be switched
//
// Despite being excluded by the qualifier filter, a copy of every cell
// that reaches the sink is present in the final result.
//
// As with an [Interleave][google.bigtable.v1.RowFilter.Interleave],
// duplicate cells are possible, and appear in an unspecified mutual order.
// In this case we have a duplicate with column "A:B" and timestamp 2,
// because one copy passed through the all filter while the other was
// passed through the label and sink. Note that one copy has label "foo",
// while the other does not.
//
// Cannot be used within the `predicate_filter`, `true_filter`, or
// `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition].
bool sink = 16;
// Matches all cells, regardless of input. Functionally equivalent to
// leaving `filter` unset, but included for completeness.
bool pass_all_filter = 17;
// Does not match any cells, regardless of input. Useful for temporarily
// disabling just part of a filter.
bool block_all_filter = 18;
// Matches only cells from rows whose keys satisfy the given RE2 regex. In
// other words, passes through the entire row when the key matches, and
// otherwise produces an empty row.
// Note that, since row keys can contain arbitrary bytes, the '\C' escape
// sequence must be used if a true wildcard is desired. The '.' character
// will not match the new line character '\n', which may be present in a
// binary key.
bytes row_key_regex_filter = 4;
// Matches all cells from a row with probability p, and matches no cells
// from the row with probability 1-p.
double row_sample_filter = 14;
// Matches only cells from columns whose families satisfy the given RE2
// regex. For technical reasons, the regex must not contain the ':'
// character, even if it is not being used as a literal.
// Note that, since column families cannot contain the new line character
// '\n', it is sufficient to use '.' as a full wildcard when matching
// column family names.
string family_name_regex_filter = 5;
// Matches only cells from columns whose qualifiers satisfy the given RE2
// regex.
// Note that, since column qualifiers can contain arbitrary bytes, the '\C'
// escape sequence must be used if a true wildcard is desired. The '.'
// character will not match the new line character '\n', which may be
// present in a binary qualifier.
bytes column_qualifier_regex_filter = 6;
// Matches only cells from columns within the given range.
ColumnRange column_range_filter = 7;
// Matches only cells with timestamps within the given range.
TimestampRange timestamp_range_filter = 8;
// Matches only cells with values that satisfy the given regular expression.
// Note that, since cell values can contain arbitrary bytes, the '\C' escape
// sequence must be used if a true wildcard is desired. The '.' character
// will not match the new line character '\n', which may be present in a
// binary value.
bytes value_regex_filter = 9;
// Matches only cells with values that fall within the given range.
ValueRange value_range_filter = 15;
// Skips the first N cells of each row, matching all subsequent cells.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_offset_filter = 10;
// Matches only the first N cells of each row.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_limit_filter = 11;
// Matches only the most recent N cells within each column. For example,
// if N=2, this filter would match column "foo:bar" at timestamps 10 and 9,
// skip all earlier cells in "foo:bar", and then begin matching again in
// column "foo:bar2".
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_column_limit_filter = 12;
// Replaces each cell's value with the empty string.
bool strip_value_transformer = 13;
// Applies the given label to all cells in the output row. This allows
// the client to determine which results were produced from which part of
// the filter.
//
// Values must be at most 15 characters in length, and match the RE2
// pattern [a-z0-9\\-]+
//
// Due to a technical limitation, it is not currently possible to apply
// multiple labels to a cell. As a result, a Chain may have no more than
// one sub-filter which contains a apply_label_transformer. It is okay for
// an Interleave to contain multiple apply_label_transformers, as they will
// be applied to separate copies of the input. This may be relaxed in the
// future.
string apply_label_transformer = 19;
}
}
// Specifies a particular change to be made to the contents of a row.
message Mutation {
// A Mutation which sets the value of the specified cell.
message SetCell {
// The name of the family into which new data should be written.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
// The qualifier of the column into which new data should be written.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The timestamp of the cell into which new data should be written.
// Use -1 for current Bigtable server time.
// Otherwise, the client should set this value itself, noting that the
// default value is a timestamp of zero if the field is left unspecified.
// Values must match the "granularity" of the table (e.g. micros, millis).
int64 timestamp_micros = 3;
// The value to be written into the specified cell.
bytes value = 4;
}
// A Mutation which deletes cells from the specified column, optionally
// restricting the deletions to a given timestamp range.
message DeleteFromColumn {
// The name of the family from which cells should be deleted.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
// The qualifier of the column from which cells should be deleted.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The range of timestamps within which cells should be deleted.
TimestampRange time_range = 3;
}
// A Mutation which deletes all cells from the specified column family.
message DeleteFromFamily {
// The name of the family from which cells should be deleted.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
}
// A Mutation which deletes all cells from the containing row.
message DeleteFromRow {
}
// Which of the possible Mutation types to apply.
oneof mutation {
// Set a cell's value.
SetCell set_cell = 1;
// Deletes cells from a column.
DeleteFromColumn delete_from_column = 2;
// Deletes cells from a column family.
DeleteFromFamily delete_from_family = 3;
// Deletes cells from the entire row.
DeleteFromRow delete_from_row = 4;
}
}
// Specifies an atomic read/modify/write operation on the latest value of the
// specified column.
message ReadModifyWriteRule {
// The name of the family to which the read/modify/write should be applied.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
// The qualifier of the column to which the read/modify/write should be
// applied.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The rule used to determine the column's new latest value from its current
// latest value.
oneof rule {
// Rule specifying that "append_value" be appended to the existing value.
// If the targeted cell is unset, it will be treated as containing the
// empty string.
bytes append_value = 3;
// Rule specifying that "increment_amount" be added to the existing value.
// If the targeted cell is unset, it will be treated as containing a zero.
// Otherwise, the targeted cell must contain an 8-byte value (interpreted
// as a 64-bit big-endian signed integer), or the entire request will fail.
int64 increment_amount = 4;
}
}

View file

@ -0,0 +1,73 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
import "google/api/annotations.proto";
import "google/bigtable/v1/bigtable_data.proto";
import "google/bigtable/v1/bigtable_service_messages.proto";
import "google/protobuf/empty.proto";
option java_generic_services = true;
option java_multiple_files = true;
option java_outer_classname = "BigtableServicesProto";
option java_package = "com.google.bigtable.v1";
// Service for reading from and writing to existing Bigtables.
service BigtableService {
// Streams back the contents of all requested rows, optionally applying
// the same Reader filter to each. Depending on their size, rows may be
// broken up across multiple responses, but atomicity of each row will still
// be preserved.
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" };
}
// Returns a sample of row keys in the table. The returned row keys will
// delimit contiguous sections of the table of approximately equal size,
// which can be used to break up the data for distributed tasks like
// mapreduces.
rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" };
}
// Mutates a row atomically. Cells already present in the row are left
// unchanged unless explicitly changed by 'mutation'.
rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" };
}
// Mutates multiple rows in a batch. Each individual row is mutated
// atomically as in MutateRow, but the entire batch is not executed
// atomically.
rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" };
}
// Mutates a row atomically based on the output of a predicate Reader filter.
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" };
}
// Modifies a row atomically, reading the latest existing timestamp/value from
// the specified columns and writing a new value at
// max(existing timestamp, current server time) based on pre-defined
// read/modify/write rules. Returns the new contents of all modified cells.
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" };
}
}

View file

@ -0,0 +1,214 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
import "google/bigtable/v1/bigtable_data.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableServiceMessagesProto";
option java_package = "com.google.bigtable.v1";
// Request message for BigtableServer.ReadRows.
message ReadRowsRequest {
// The unique name of the table from which to read.
string table_name = 1;
// If neither row_key nor row_range is set, reads from all rows.
oneof target {
// The key of a single row from which to read.
bytes row_key = 2;
// A range of rows from which to read.
RowRange row_range = 3;
// A set of rows from which to read. Entries need not be in order, and will
// be deduplicated before reading.
// The total serialized size of the set must not exceed 1MB.
RowSet row_set = 8;
}
// The filter to apply to the contents of the specified row(s). If unset,
// reads the entire table.
RowFilter filter = 5;
// By default, rows are read sequentially, producing results which are
// guaranteed to arrive in increasing row order. Setting
// "allow_row_interleaving" to true allows multiple rows to be interleaved in
// the response stream, which increases throughput but breaks this guarantee,
// and may force the client to use more memory to buffer partially-received
// rows. Cannot be set to true when specifying "num_rows_limit".
bool allow_row_interleaving = 6;
// The read will terminate after committing to N rows' worth of results. The
// default (zero) is to return all results.
// Note that "allow_row_interleaving" cannot be set to true when this is set.
int64 num_rows_limit = 7;
}
// Response message for BigtableService.ReadRows.
message ReadRowsResponse {
// Specifies a piece of a row's contents returned as part of the read
// response stream.
message Chunk {
oneof chunk {
// A subset of the data from a particular row. As long as no "reset_row"
// is received in between, multiple "row_contents" from the same row are
// from the same atomic view of that row, and will be received in the
// expected family/column/timestamp order.
Family row_contents = 1;
// Indicates that the client should drop all previous chunks for
// "row_key", as it will be re-read from the beginning.
bool reset_row = 2;
// Indicates that the client can safely process all previous chunks for
// "row_key", as its data has been fully read.
bool commit_row = 3;
}
}
// The key of the row for which we're receiving data.
// Results will be received in increasing row key order, unless
// "allow_row_interleaving" was specified in the request.
bytes row_key = 1;
// One or more chunks of the row specified by "row_key".
repeated Chunk chunks = 2;
}
// Request message for BigtableService.SampleRowKeys.
message SampleRowKeysRequest {
// The unique name of the table from which to sample row keys.
string table_name = 1;
}
// Response message for BigtableService.SampleRowKeys.
message SampleRowKeysResponse {
// Sorted streamed sequence of sample row keys in the table. The table might
// have contents before the first row key in the list and after the last one,
// but a key containing the empty string indicates "end of table" and will be
// the last response given, if present.
// Note that row keys in this list may not have ever been written to or read
// from, and users should therefore not make any assumptions about the row key
// structure that are specific to their use case.
bytes row_key = 1;
// Approximate total storage space used by all rows in the table which precede
// "row_key". Buffering the contents of all rows between two subsequent
// samples would require space roughly equal to the difference in their
// "offset_bytes" fields.
int64 offset_bytes = 2;
}
// Request message for BigtableService.MutateRow.
message MutateRowRequest {
// The unique name of the table to which the mutation should be applied.
string table_name = 1;
// The key of the row to which the mutation should be applied.
bytes row_key = 2;
// Changes to be atomically applied to the specified row. Entries are applied
// in order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry and at most 100000.
repeated Mutation mutations = 3;
}
// Request message for BigtableService.MutateRows.
message MutateRowsRequest {
message Entry {
// The key of the row to which the `mutations` should be applied.
bytes row_key = 1;
// Changes to be atomically applied to the specified row. Mutations are
// applied in order, meaning that earlier mutations can be masked by
// later ones.
// At least one mutation must be specified.
repeated Mutation mutations = 2;
}
// The unique name of the table to which the mutations should be applied.
string table_name = 1;
// The row keys/mutations to be applied in bulk.
// Each entry is applied as an atomic mutation, but the entries may be
// applied in arbitrary order (even between entries for the same row).
// At least one entry must be specified, and in total the entries may
// contain at most 100000 mutations.
repeated Entry entries = 2;
}
// Response message for BigtableService.MutateRows.
message MutateRowsResponse {
// The results for each Entry from the request, presented in the order
// in which the entries were originally given.
repeated google.rpc.Status statuses = 1;
}
// Request message for BigtableService.CheckAndMutateRowRequest
message CheckAndMutateRowRequest {
// The unique name of the table to which the conditional mutation should be
// applied.
string table_name = 1;
// The key of the row to which the conditional mutation should be applied.
bytes row_key = 2;
// The filter to be applied to the contents of the specified row. Depending
// on whether or not any results are yielded, either "true_mutations" or
// "false_mutations" will be executed. If unset, checks that the row contains
// any values at all.
RowFilter predicate_filter = 6;
// Changes to be atomically applied to the specified row if "predicate_filter"
// yields at least one cell when applied to "row_key". Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if "false_mutations" is empty, and at most
// 100000.
repeated Mutation true_mutations = 4;
// Changes to be atomically applied to the specified row if "predicate_filter"
// does not yield any cells when applied to "row_key". Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if "true_mutations" is empty, and at most
// 100000.
repeated Mutation false_mutations = 5;
}
// Response message for BigtableService.CheckAndMutateRowRequest.
message CheckAndMutateRowResponse {
// Whether or not the request's "predicate_filter" yielded any results for
// the specified row.
bool predicate_matched = 1;
}
// Request message for BigtableService.ReadModifyWriteRowRequest.
message ReadModifyWriteRowRequest {
// The unique name of the table to which the read/modify/write rules should be
// applied.
string table_name = 1;
// The key of the row to which the read/modify/write rules should be applied.
bytes row_key = 2;
// Rules specifying how the specified row's contents are to be transformed
// into writes. Entries are applied in order, meaning that earlier rules will
// affect the results of later ones.
repeated ReadModifyWriteRule rules = 3;
}

View file

@ -0,0 +1,125 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.table.v1;
import "google/longrunning/operations.proto";
import "google/protobuf/duration.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableDataProto";
option java_package = "com.google.bigtable.admin.table.v1";
// A collection of user data indexed by row, column, and timestamp.
// Each table is served using the resources of its parent cluster.
message Table {
enum TimestampGranularity {
MILLIS = 0;
}
// A unique identifier of the form
// <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
string name = 1;
// If this Table is in the process of being created, the Operation used to
// track its progress. As long as this operation is present, the Table will
// not accept any Table Admin or Read/Write requests.
google.longrunning.Operation current_operation = 2;
// The column families configured for this table, mapped by column family id.
map<string, ColumnFamily> column_families = 3;
// The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
// this table. Timestamps not matching the granularity will be rejected.
// Cannot be changed once the table is created.
TimestampGranularity granularity = 4;
}
// A set of columns within a table which share a common configuration.
message ColumnFamily {
// A unique identifier of the form <table_name>/columnFamilies/[-_.a-zA-Z0-9]+
// The last segment is the same as the "name" field in
// google.bigtable.v1.Family.
string name = 1;
// Garbage collection expression specified by the following grammar:
// GC = EXPR
// | "" ;
// EXPR = EXPR, "||", EXPR (* lowest precedence *)
// | EXPR, "&&", EXPR
// | "(", EXPR, ")" (* highest precedence *)
// | PROP ;
// PROP = "version() >", NUM32
// | "age() >", NUM64, [ UNIT ] ;
// NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *)
// NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *)
// UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *)
// GC expressions can be up to 500 characters in length
//
// The different types of PROP are defined as follows:
// version() - cell index, counting from most recent and starting at 1
// age() - age of the cell (current time minus cell timestamp)
//
// Example: "version() > 3 || (age() > 3d && version() > 1)"
// drop cells beyond the most recent three, and drop cells older than three
// days unless they're the most recent cell in the row/column
//
// Garbage collection executes opportunistically in the background, and so
// it's possible for reads to return a cell even if it matches the active GC
// expression for its family.
string gc_expression = 2;
// Garbage collection rule specified as a protobuf.
// Supersedes `gc_expression`.
// Must serialize to at most 500 bytes.
//
// NOTE: Garbage collection executes opportunistically in the background, and
// so it's possible for reads to return a cell even if it matches the active
// GC expression for its family.
GcRule gc_rule = 3;
}
// Rule for determining which cells to delete during garbage collection.
message GcRule {
// A GcRule which deletes cells matching all of the given rules.
message Intersection {
// Only delete cells which would be deleted by every element of `rules`.
repeated GcRule rules = 1;
}
// A GcRule which deletes cells matching any of the given rules.
message Union {
// Delete cells which would be deleted by any element of `rules`.
repeated GcRule rules = 1;
}
oneof rule {
// Delete all cells in a column except the most recent N.
int32 max_num_versions = 1;
// Delete cells in a column older than the given age.
// Values must be at least one millisecond, and will be truncated to
// microsecond granularity.
google.protobuf.Duration max_age = 2;
// Delete cells that would be deleted by every nested rule.
Intersection intersection = 3;
// Delete cells that would be deleted by any nested rule.
Union union = 4;
}
}

View file

@ -0,0 +1,74 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.table.v1;
import "google/api/annotations.proto";
import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto";
import "google/protobuf/empty.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableServicesProto";
option java_package = "com.google.bigtable.admin.table.v1";
// Service for creating, configuring, and deleting Cloud Bigtable tables.
// Provides access to the table schemas only, not the data stored within the tables.
service BigtableTableService {
// Creates a new table, to be served from a specified cluster.
// The table can be created with a full set of initial column families,
// specified in the request.
rpc CreateTable(CreateTableRequest) returns (Table) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" };
}
// Lists the names of all tables served from a specified cluster.
rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" };
}
// Gets the schema of the specified table, including its column families.
rpc GetTable(GetTableRequest) returns (Table) {
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
}
// Permanently deletes a specified table and all of its data.
rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
}
// Changes the name of a specified table.
// Cannot be used to move tables between clusters, zones, or projects.
rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" };
}
// Creates a new column family within a specified table.
rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" };
}
// Changes the configuration of a specified column family.
rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) {
option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" };
}
// Permanently deletes a specified column family and all of its data.
rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" };
}
}

View file

@ -0,0 +1,101 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.table.v1;
import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableServiceMessagesProto";
option java_package = "com.google.bigtable.admin.table.v1";
message CreateTableRequest {
// The unique name of the cluster in which to create the new table.
string name = 1;
// The name by which the new table should be referred to within the cluster,
// e.g. "foobar" rather than "<cluster_name>/tables/foobar".
string table_id = 2;
// The Table to create. The `name` field of the Table and all of its
// ColumnFamilies must be left blank, and will be populated in the response.
Table table = 3;
// The optional list of row keys that will be used to initially split the
// table into several tablets (Tablets are similar to HBase regions).
// Given two split keys, "s1" and "s2", three tablets will be created,
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
//
// Example:
// * Row keys := ["a", "apple", "custom", "customer_1", "customer_2",
// "other", "zz"]
// * initial_split_keys := ["apple", "customer_1", "customer_2", "other"]
// * Key assignment:
// - Tablet 1 [, apple) => {"a"}.
// - Tablet 2 [apple, customer_1) => {"apple", "custom"}.
// - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
// - Tablet 4 [customer_2, other) => {"customer_2"}.
// - Tablet 5 [other, ) => {"other", "zz"}.
repeated string initial_split_keys = 4;
}
message ListTablesRequest {
// The unique name of the cluster for which tables should be listed.
string name = 1;
}
message ListTablesResponse {
// The tables present in the requested cluster.
// At present, only the names of the tables are populated.
repeated Table tables = 1;
}
message GetTableRequest {
// The unique name of the requested table.
string name = 1;
}
message DeleteTableRequest {
// The unique name of the table to be deleted.
string name = 1;
}
message RenameTableRequest {
// The current unique name of the table.
string name = 1;
// The new name by which the table should be referred to within its containing
// cluster, e.g. "foobar" rather than "<cluster_name>/tables/foobar".
string new_id = 2;
}
message CreateColumnFamilyRequest {
// The unique name of the table in which to create the new column family.
string name = 1;
// The name by which the new column family should be referred to within the
// table, e.g. "foobar" rather than "<table_name>/columnFamilies/foobar".
string column_family_id = 2;
// The column family to create. The `name` field must be left blank.
ColumnFamily column_family = 3;
}
message DeleteColumnFamilyRequest {
// The unique name of the column family to be deleted.
string name = 1;
}

View file

@ -0,0 +1,144 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.longrunning;
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "OperationsProto";
option java_package = "com.google.longrunning";
// Manages long-running operations with an API service.
//
// When an API method normally takes long time to complete, it can be designed
// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
// interface to receive the real response asynchronously by polling the
// operation resource, or using `google.watcher.v1.Watcher` interface to watch
// the response, or pass the operation resource to another API (such as Google
// Cloud Pub/Sub API) to receive the response. Any API service that returns
// long-running operations should implement the `Operations` interface so
// developers can have a consistent client experience.
service Operations {
// Gets the latest state of a long-running operation. Clients may use this
// method to poll the operation result at intervals as recommended by the API
// service.
rpc GetOperation(GetOperationRequest) returns (Operation) {
option (google.api.http) = { get: "/v1/{name=operations/**}" };
}
// Lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
option (google.api.http) = { get: "/v1/{name=operations}" };
}
// Starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients may use
// [Operations.GetOperation] or other methods to check whether the
// cancellation succeeded or the operation completed despite cancellation.
rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
}
// Deletes a long-running operation. It indicates the client is no longer
// interested in the operation result. It does not cancel the operation.
rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=operations/**}" };
}
}
// This resource represents a long-running operation that is the result of a
// network API call.
message Operation {
// The name of the operation resource, which is only unique within the same
// service that originally returns it.
string name = 1;
// Some service-specific metadata associated with the operation. It typically
// contains progress information and common metadata such as create time.
// Some services may not provide such metadata. Any method that returns a
// long-running operation should document the metadata type, if any.
google.protobuf.Any metadata = 2;
// If the value is false, it means the operation is still in progress.
// If true, the operation is completed and the `result` is available.
bool done = 3;
oneof result {
// The error result of the operation in case of failure.
google.rpc.Status error = 4;
// The normal response of the operation in case of success. If the original
// method returns no data on success, such as `Delete`, the response will be
// `google.protobuf.Empty`. If the original method is standard
// `Get`/`Create`/`Update`, the response should be the resource. For other
// methods, the response should have the type `XxxResponse`, where `Xxx`
// is the original method name. For example, if the original method name
// is `TakeSnapshot()`, the inferred response type will be
// `TakeSnapshotResponse`.
google.protobuf.Any response = 5;
}
}
// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
message GetOperationRequest {
// The name of the operation resource.
string name = 1;
}
// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsRequest {
// The name of the operation collection.
string name = 4;
// The standard List filter.
string filter = 1;
// The standard List page size.
int32 page_size = 2;
// The standard List page token.
string page_token = 3;
}
// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsResponse {
// A list of operations that match the specified filter in the request.
repeated Operation operations = 1;
// The standard List next-page token.
string next_page_token = 2;
}
// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
message CancelOperationRequest {
// The name of the operation resource to be cancelled.
string name = 1;
}
// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
message DeleteOperationRequest {
// The name of the operation resource to be deleted.
string name = 1;
}

View file

@ -0,0 +1,221 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto',
package='google.bigtable.admin.cluster.v1',
syntax='proto3',
serialized_pb=b'\n<google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto\x12 google.bigtable.admin.cluster.v1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\x01\n\x04Zone\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12=\n\x06status\x18\x03 \x01(\x0e\x32-.google.bigtable.admin.cluster.v1.Zone.Status\"P\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02OK\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x18\n\x14\x45MERGENCY_MAINENANCE\x10\x03\"\xc9\x01\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x11\x63urrent_operation\x18\x03 \x01(\x0b\x32\x1d.google.longrunning.Operation\x12\x14\n\x0c\x64isplay_name\x18\x04 \x01(\t\x12\x13\n\x0bserve_nodes\x18\x05 \x01(\x05\x12K\n\x14\x64\x65\x66\x61ult_storage_type\x18\x08 \x01(\x0e\x32-.google.bigtable.admin.cluster.v1.StorageType*H\n\x0bStorageType\x12\x17\n\x13STORAGE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bSTORAGE_SSD\x10\x01\x12\x0f\n\x0bSTORAGE_HDD\x10\x02\x42\x42\n$com.google.bigtable.admin.cluster.v1B\x18\x42igtableClusterDataProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STORAGETYPE = _descriptor.EnumDescriptor(
name='StorageType',
full_name='google.bigtable.admin.cluster.v1.StorageType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STORAGE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STORAGE_SSD', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STORAGE_HDD', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=592,
serialized_end=664,
)
_sym_db.RegisterEnumDescriptor(_STORAGETYPE)
StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE)
STORAGE_UNSPECIFIED = 0
STORAGE_SSD = 1
STORAGE_HDD = 2
_ZONE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='google.bigtable.admin.cluster.v1.Zone.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PLANNED_MAINTENANCE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMERGENCY_MAINENANCE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=306,
serialized_end=386,
)
_sym_db.RegisterEnumDescriptor(_ZONE_STATUS)
_ZONE = _descriptor.Descriptor(
name='Zone',
full_name='google.bigtable.admin.cluster.v1.Zone',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.Zone.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.bigtable.admin.cluster.v1.Zone.display_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='google.bigtable.admin.cluster.v1.Zone.status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ZONE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=199,
serialized_end=386,
)
_CLUSTER = _descriptor.Descriptor(
name='Cluster',
full_name='google.bigtable.admin.cluster.v1.Cluster',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.Cluster.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_operation', full_name='google.bigtable.admin.cluster.v1.Cluster.current_operation', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.bigtable.admin.cluster.v1.Cluster.display_name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serve_nodes', full_name='google.bigtable.admin.cluster.v1.Cluster.serve_nodes', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_storage_type', full_name='google.bigtable.admin.cluster.v1.Cluster.default_storage_type', index=4,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=389,
serialized_end=590,
)
_ZONE.fields_by_name['status'].enum_type = _ZONE_STATUS
_ZONE_STATUS.containing_type = _ZONE
_CLUSTER.fields_by_name['current_operation'].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION
_CLUSTER.fields_by_name['default_storage_type'].enum_type = _STORAGETYPE
DESCRIPTOR.message_types_by_name['Zone'] = _ZONE
DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER
DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE
Zone = _reflection.GeneratedProtocolMessageType('Zone', (_message.Message,), dict(
DESCRIPTOR = _ZONE,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.Zone)
))
_sym_db.RegisterMessage(Zone)
Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict(
DESCRIPTOR = _CLUSTER,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.Cluster)
))
_sym_db.RegisterMessage(Cluster)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n$com.google.bigtable.admin.cluster.v1B\030BigtableClusterDataProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,538 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto',
package='google.bigtable.admin.cluster.v1',
syntax='proto3',
serialized_pb=b'\nHgoogle/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto\x12 google.bigtable.admin.cluster.v1\x1a<google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto\x1a\x1fgoogle/protobuf/timestamp.proto\" \n\x10ListZonesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"J\n\x11ListZonesResponse\x12\x35\n\x05zones\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.cluster.v1.Zone\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"#\n\x13ListClustersRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x91\x01\n\x14ListClustersResponse\x12;\n\x08\x63lusters\x18\x01 \x03(\x0b\x32).google.bigtable.admin.cluster.v1.Cluster\x12<\n\x0c\x66\x61iled_zones\x18\x02 \x03(\x0b\x32&.google.bigtable.admin.cluster.v1.Zone\"t\n\x14\x43reateClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12:\n\x07\x63luster\x18\x03 \x01(\x0b\x32).google.bigtable.admin.cluster.v1.Cluster\"\xcc\x01\n\x15\x43reateClusterMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.cluster.v1.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xf0\x01\n\x15UpdateClusterMetadata\x12\x43\n\x10original_request\x18\x01 \x01(\x0b\x32).google.bigtable.admin.cluster.v1.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63\x61ncel_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16UndeleteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"|\n\x17UndeleteClusterMetadata\x12\x30\n\x0crequest_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampBM\n$com.google.bigtable.admin.cluster.v1B#BigtableClusterServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LISTZONESREQUEST = _descriptor.Descriptor(
name='ListZonesRequest',
full_name='google.bigtable.admin.cluster.v1.ListZonesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.ListZonesRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=205,
serialized_end=237,
)
_LISTZONESRESPONSE = _descriptor.Descriptor(
name='ListZonesResponse',
full_name='google.bigtable.admin.cluster.v1.ListZonesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='zones', full_name='google.bigtable.admin.cluster.v1.ListZonesResponse.zones', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=313,
)
_GETCLUSTERREQUEST = _descriptor.Descriptor(
name='GetClusterRequest',
full_name='google.bigtable.admin.cluster.v1.GetClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.GetClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=315,
serialized_end=348,
)
_LISTCLUSTERSREQUEST = _descriptor.Descriptor(
name='ListClustersRequest',
full_name='google.bigtable.admin.cluster.v1.ListClustersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.ListClustersRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=350,
serialized_end=385,
)
_LISTCLUSTERSRESPONSE = _descriptor.Descriptor(
name='ListClustersResponse',
full_name='google.bigtable.admin.cluster.v1.ListClustersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clusters', full_name='google.bigtable.admin.cluster.v1.ListClustersResponse.clusters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_zones', full_name='google.bigtable.admin.cluster.v1.ListClustersResponse.failed_zones', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=533,
)
_CREATECLUSTERREQUEST = _descriptor.Descriptor(
name='CreateClusterRequest',
full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest.cluster_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cluster', full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest.cluster', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=651,
)
_CREATECLUSTERMETADATA = _descriptor.Descriptor(
name='CreateClusterMetadata',
full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_request', full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata.original_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_time', full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata.request_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time', full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata.finish_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=858,
)
_UPDATECLUSTERMETADATA = _descriptor.Descriptor(
name='UpdateClusterMetadata',
full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_request', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.original_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_time', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.request_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cancel_time', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.cancel_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.finish_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=861,
serialized_end=1101,
)
_DELETECLUSTERREQUEST = _descriptor.Descriptor(
name='DeleteClusterRequest',
full_name='google.bigtable.admin.cluster.v1.DeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.DeleteClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1103,
serialized_end=1139,
)
_UNDELETECLUSTERREQUEST = _descriptor.Descriptor(
name='UndeleteClusterRequest',
full_name='google.bigtable.admin.cluster.v1.UndeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.UndeleteClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1141,
serialized_end=1179,
)
_UNDELETECLUSTERMETADATA = _descriptor.Descriptor(
name='UndeleteClusterMetadata',
full_name='google.bigtable.admin.cluster.v1.UndeleteClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_time', full_name='google.bigtable.admin.cluster.v1.UndeleteClusterMetadata.request_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time', full_name='google.bigtable.admin.cluster.v1.UndeleteClusterMetadata.finish_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1181,
serialized_end=1305,
)
_LISTZONESRESPONSE.fields_by_name['zones'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._ZONE
_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._CLUSTER
_LISTCLUSTERSRESPONSE.fields_by_name['failed_zones'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._ZONE
_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._CLUSTER
_CREATECLUSTERMETADATA.fields_by_name['original_request'].message_type = _CREATECLUSTERREQUEST
_CREATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CREATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._CLUSTER
_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATECLUSTERMETADATA.fields_by_name['cancel_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UNDELETECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UNDELETECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['ListZonesRequest'] = _LISTZONESREQUEST
DESCRIPTOR.message_types_by_name['ListZonesResponse'] = _LISTZONESRESPONSE
DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST
DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE
DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['CreateClusterMetadata'] = _CREATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UndeleteClusterRequest'] = _UNDELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UndeleteClusterMetadata'] = _UNDELETECLUSTERMETADATA
ListZonesRequest = _reflection.GeneratedProtocolMessageType('ListZonesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTZONESREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListZonesRequest)
))
_sym_db.RegisterMessage(ListZonesRequest)
ListZonesResponse = _reflection.GeneratedProtocolMessageType('ListZonesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTZONESRESPONSE,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListZonesResponse)
))
_sym_db.RegisterMessage(ListZonesResponse)
GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.GetClusterRequest)
))
_sym_db.RegisterMessage(GetClusterRequest)
ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListClustersRequest)
))
_sym_db.RegisterMessage(ListClustersRequest)
ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSRESPONSE,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListClustersResponse)
))
_sym_db.RegisterMessage(ListClustersResponse)
CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.CreateClusterRequest)
))
_sym_db.RegisterMessage(CreateClusterRequest)
CreateClusterMetadata = _reflection.GeneratedProtocolMessageType('CreateClusterMetadata', (_message.Message,), dict(
DESCRIPTOR = _CREATECLUSTERMETADATA,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.CreateClusterMetadata)
))
_sym_db.RegisterMessage(CreateClusterMetadata)
UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict(
DESCRIPTOR = _UPDATECLUSTERMETADATA,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.UpdateClusterMetadata)
))
_sym_db.RegisterMessage(UpdateClusterMetadata)
DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.DeleteClusterRequest)
))
_sym_db.RegisterMessage(DeleteClusterRequest)
UndeleteClusterRequest = _reflection.GeneratedProtocolMessageType('UndeleteClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _UNDELETECLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.UndeleteClusterRequest)
))
_sym_db.RegisterMessage(UndeleteClusterRequest)
UndeleteClusterMetadata = _reflection.GeneratedProtocolMessageType('UndeleteClusterMetadata', (_message.Message,), dict(
DESCRIPTOR = _UNDELETECLUSTERMETADATA,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.UndeleteClusterMetadata)
))
_sym_db.RegisterMessage(UndeleteClusterMetadata)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n$com.google.bigtable.admin.cluster.v1B#BigtableClusterServiceMessagesProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,187 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2
from gcloud.bigtable._generated import bigtable_cluster_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__service__messages__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto',
package='google.bigtable.admin.cluster.v1',
syntax='proto3',
serialized_pb=b'\n?google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto\x12 google.bigtable.admin.cluster.v1\x1a\x1cgoogle/api/annotations.proto\x1a<google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto\x1aHgoogle/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto2\x8f\t\n\x16\x42igtableClusterService\x12\x99\x01\n\tListZones\x12\x32.google.bigtable.admin.cluster.v1.ListZonesRequest\x1a\x33.google.bigtable.admin.cluster.v1.ListZonesResponse\"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1/{name=projects/*}/zones\x12\x9e\x01\n\nGetCluster\x12\x33.google.bigtable.admin.cluster.v1.GetClusterRequest\x1a).google.bigtable.admin.cluster.v1.Cluster\"0\x82\xd3\xe4\x93\x02*\x12(/v1/{name=projects/*/zones/*/clusters/*}\x12\xb0\x01\n\x0cListClusters\x12\x35.google.bigtable.admin.cluster.v1.ListClustersRequest\x1a\x36.google.bigtable.admin.cluster.v1.ListClustersResponse\"1\x82\xd3\xe4\x93\x02+\x12)/v1/{name=projects/*}/aggregated/clusters\x12\xa5\x01\n\rCreateCluster\x12\x36.google.bigtable.admin.cluster.v1.CreateClusterRequest\x1a).google.bigtable.admin.cluster.v1.Cluster\"1\x82\xd3\xe4\x93\x02+\"&/v1/{name=projects/*/zones/*}/clusters:\x01*\x12\x9a\x01\n\rUpdateCluster\x12).google.bigtable.admin.cluster.v1.Cluster\x1a).google.bigtable.admin.cluster.v1.Cluster\"3\x82\xd3\xe4\x93\x02-\x1a(/v1/{name=projects/*/zones/*/clusters/*}:\x01*\x12\x91\x01\n\rDeleteCluster\x12\x36.google.bigtable.admin.cluster.v1.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"0\x82\xd3\xe4\x93\x02**(/v1/{name=projects/*/zones/*/clusters/*}\x12\xab\x01\n\x0fUndeleteCluster\x12\x38.google.bigtable.admin.cluster.v1.UndeleteClusterRequest\x1a\x1d.google.longrunning.Operation\"?\x82\xd3\xe4\x93\x02\x39\"1/v1/{name=projects/*/zones/*/clusters/*}:undelete:\x04nullBF\n$com.google.bigtable.admin.cluster.v1B\x1c\x42igtableClusterServicesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__service__messages__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n$com.google.bigtable.admin.cluster.v1B\034BigtableClusterServicesProtoP\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableClusterServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ListZones(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListClusters(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CreateCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UndeleteCluster(self, request, context):
raise NotImplementedError()
class BetaBigtableClusterServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ListZones(self, request, timeout):
raise NotImplementedError()
ListZones.future = None
@abc.abstractmethod
def GetCluster(self, request, timeout):
raise NotImplementedError()
GetCluster.future = None
@abc.abstractmethod
def ListClusters(self, request, timeout):
raise NotImplementedError()
ListClusters.future = None
@abc.abstractmethod
def CreateCluster(self, request, timeout):
raise NotImplementedError()
CreateCluster.future = None
@abc.abstractmethod
def UpdateCluster(self, request, timeout):
raise NotImplementedError()
UpdateCluster.future = None
@abc.abstractmethod
def DeleteCluster(self, request, timeout):
raise NotImplementedError()
DeleteCluster.future = None
@abc.abstractmethod
def UndeleteCluster(self, request, timeout):
raise NotImplementedError()
UndeleteCluster.future = None
def beta_create_BigtableClusterService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.longrunning.operations_pb2
request_deserializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
}
response_serializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
}
method_implementations = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): face_utilities.unary_unary_inline(servicer.ListZones),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): face_utilities.unary_unary_inline(servicer.UndeleteCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableClusterService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.longrunning.operations_pb2
request_serializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
}
response_deserializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
}
cardinalities = {
'CreateCluster': cardinality.Cardinality.UNARY_UNARY,
'DeleteCluster': cardinality.Cardinality.UNARY_UNARY,
'GetCluster': cardinality.Cardinality.UNARY_UNARY,
'ListClusters': cardinality.Cardinality.UNARY_UNARY,
'ListZones': cardinality.Cardinality.UNARY_UNARY,
'UndeleteCluster': cardinality.Cardinality.UNARY_UNARY,
'UpdateCluster': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.cluster.v1.BigtableClusterService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,678 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/v1/bigtable_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/v1/bigtable_service_messages.proto',
package='google.bigtable.v1',
syntax='proto3',
serialized_pb=b'\n2google/bigtable/v1/bigtable_service_messages.proto\x12\x12google.bigtable.v1\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x17google/rpc/status.proto\"\x8b\x02\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x11\n\x07row_key\x18\x02 \x01(\x0cH\x00\x12\x31\n\trow_range\x18\x03 \x01(\x0b\x32\x1c.google.bigtable.v1.RowRangeH\x00\x12-\n\x07row_set\x18\x08 \x01(\x0b\x32\x1a.google.bigtable.v1.RowSetH\x00\x12-\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x1e\n\x16\x61llow_row_interleaving\x18\x06 \x01(\x08\x12\x16\n\x0enum_rows_limit\x18\x07 \x01(\x03\x42\x08\n\x06target\"\xd0\x01\n\x10ReadRowsResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12:\n\x06\x63hunks\x18\x02 \x03(\x0b\x32*.google.bigtable.v1.ReadRowsResponse.Chunk\x1ao\n\x05\x43hunk\x12\x32\n\x0crow_contents\x18\x01 \x01(\x0b\x32\x1a.google.bigtable.v1.FamilyH\x00\x12\x13\n\treset_row\x18\x02 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\x03 \x01(\x08H\x00\x42\x07\n\x05\x63hunk\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v1.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\":\n\x12MutateRowsResponse\x12$\n\x08statuses\x18\x01 \x03(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v1.ReadModifyWriteRuleB8\n\x16\x63om.google.bigtable.v1B\x1c\x42igtableServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_READROWSREQUEST = _descriptor.Descriptor(
name='ReadRowsRequest',
full_name='google.bigtable.v1.ReadRowsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.ReadRowsRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.ReadRowsRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_range', full_name='google.bigtable.v1.ReadRowsRequest.row_range', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_set', full_name='google.bigtable.v1.ReadRowsRequest.row_set', index=3,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filter', full_name='google.bigtable.v1.ReadRowsRequest.filter', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_row_interleaving', full_name='google.bigtable.v1.ReadRowsRequest.allow_row_interleaving', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_rows_limit', full_name='google.bigtable.v1.ReadRowsRequest.num_rows_limit', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='target', full_name='google.bigtable.v1.ReadRowsRequest.target',
index=0, containing_type=None, fields=[]),
],
serialized_start=140,
serialized_end=407,
)
_READROWSRESPONSE_CHUNK = _descriptor.Descriptor(
name='Chunk',
full_name='google.bigtable.v1.ReadRowsResponse.Chunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_contents', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.row_contents', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reset_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.reset_row', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='commit_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.commit_row', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='chunk', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.chunk',
index=0, containing_type=None, fields=[]),
],
serialized_start=507,
serialized_end=618,
)
_READROWSRESPONSE = _descriptor.Descriptor(
name='ReadRowsResponse',
full_name='google.bigtable.v1.ReadRowsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.ReadRowsResponse.row_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chunks', full_name='google.bigtable.v1.ReadRowsResponse.chunks', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_READROWSRESPONSE_CHUNK, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=410,
serialized_end=618,
)
_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor(
name='SampleRowKeysRequest',
full_name='google.bigtable.v1.SampleRowKeysRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.SampleRowKeysRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=620,
serialized_end=662,
)
_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor(
name='SampleRowKeysResponse',
full_name='google.bigtable.v1.SampleRowKeysResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.SampleRowKeysResponse.row_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='offset_bytes', full_name='google.bigtable.v1.SampleRowKeysResponse.offset_bytes', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=664,
serialized_end=726,
)
_MUTATEROWREQUEST = _descriptor.Descriptor(
name='MutateRowRequest',
full_name='google.bigtable.v1.MutateRowRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.MutateRowRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.MutateRowRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.bigtable.v1.MutateRowRequest.mutations', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=728,
serialized_end=832,
)
_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor(
name='Entry',
full_name='google.bigtable.v1.MutateRowsRequest.Entry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.MutateRowsRequest.Entry.row_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.bigtable.v1.MutateRowsRequest.Entry.mutations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=938,
serialized_end=1011,
)
_MUTATEROWSREQUEST = _descriptor.Descriptor(
name='MutateRowsRequest',
full_name='google.bigtable.v1.MutateRowsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.MutateRowsRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entries', full_name='google.bigtable.v1.MutateRowsRequest.entries', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MUTATEROWSREQUEST_ENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=835,
serialized_end=1011,
)
_MUTATEROWSRESPONSE = _descriptor.Descriptor(
name='MutateRowsResponse',
full_name='google.bigtable.v1.MutateRowsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='statuses', full_name='google.bigtable.v1.MutateRowsResponse.statuses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1013,
serialized_end=1071,
)
_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor(
name='CheckAndMutateRowRequest',
full_name='google.bigtable.v1.CheckAndMutateRowRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.CheckAndMutateRowRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.CheckAndMutateRowRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predicate_filter', full_name='google.bigtable.v1.CheckAndMutateRowRequest.predicate_filter', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='true_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.true_mutations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='false_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.false_mutations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1074,
serialized_end=1303,
)
_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor(
name='CheckAndMutateRowResponse',
full_name='google.bigtable.v1.CheckAndMutateRowResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='predicate_matched', full_name='google.bigtable.v1.CheckAndMutateRowResponse.predicate_matched', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1305,
serialized_end=1359,
)
_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor(
name='ReadModifyWriteRowRequest',
full_name='google.bigtable.v1.ReadModifyWriteRowRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.rules', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1361,
serialized_end=1481,
)
_READROWSREQUEST.fields_by_name['row_range'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWRANGE
_READROWSREQUEST.fields_by_name['row_set'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWSET
_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER
_READROWSREQUEST.oneofs_by_name['target'].fields.append(
_READROWSREQUEST.fields_by_name['row_key'])
_READROWSREQUEST.fields_by_name['row_key'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target']
_READROWSREQUEST.oneofs_by_name['target'].fields.append(
_READROWSREQUEST.fields_by_name['row_range'])
_READROWSREQUEST.fields_by_name['row_range'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target']
_READROWSREQUEST.oneofs_by_name['target'].fields.append(
_READROWSREQUEST.fields_by_name['row_set'])
_READROWSREQUEST.fields_by_name['row_set'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target']
_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._FAMILY
_READROWSRESPONSE_CHUNK.containing_type = _READROWSRESPONSE
_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append(
_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'])
_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk']
_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append(
_READROWSRESPONSE_CHUNK.fields_by_name['reset_row'])
_READROWSRESPONSE_CHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk']
_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append(
_READROWSRESPONSE_CHUNK.fields_by_name['commit_row'])
_READROWSRESPONSE_CHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk']
_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CHUNK
_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST
_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY
_MUTATEROWSRESPONSE.fields_by_name['statuses'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER
_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._READMODIFYWRITERULE
DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST
DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE
DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST
DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE
DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST
DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST
DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE
DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST
DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE
DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST
ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict(
DESCRIPTOR = _READROWSREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsRequest)
))
_sym_db.RegisterMessage(ReadRowsRequest)
ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict(
Chunk = _reflection.GeneratedProtocolMessageType('Chunk', (_message.Message,), dict(
DESCRIPTOR = _READROWSRESPONSE_CHUNK,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse.Chunk)
))
,
DESCRIPTOR = _READROWSRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse)
))
_sym_db.RegisterMessage(ReadRowsResponse)
_sym_db.RegisterMessage(ReadRowsResponse.Chunk)
SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict(
DESCRIPTOR = _SAMPLEROWKEYSREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysRequest)
))
_sym_db.RegisterMessage(SampleRowKeysRequest)
SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict(
DESCRIPTOR = _SAMPLEROWKEYSRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysResponse)
))
_sym_db.RegisterMessage(SampleRowKeysResponse)
MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEROWREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowRequest)
))
_sym_db.RegisterMessage(MutateRowRequest)
MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict(
Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict(
DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest.Entry)
))
,
DESCRIPTOR = _MUTATEROWSREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest)
))
_sym_db.RegisterMessage(MutateRowsRequest)
_sym_db.RegisterMessage(MutateRowsRequest.Entry)
MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEROWSRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsResponse)
))
_sym_db.RegisterMessage(MutateRowsResponse)
CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict(
DESCRIPTOR = _CHECKANDMUTATEROWREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowRequest)
))
_sym_db.RegisterMessage(CheckAndMutateRowRequest)
CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict(
DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowResponse)
))
_sym_db.RegisterMessage(CheckAndMutateRowResponse)
ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict(
DESCRIPTOR = _READMODIFYWRITEROWREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadModifyWriteRowRequest)
))
_sym_db.RegisterMessage(ReadModifyWriteRowRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\034BigtableServiceMessagesProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,167 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/v1/bigtable_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2
from gcloud.bigtable._generated import bigtable_service_messages_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/v1/bigtable_service.proto',
package='google.bigtable.v1',
syntax='proto3',
serialized_pb=b'\n)google/bigtable/v1/bigtable_service.proto\x12\x12google.bigtable.v1\x1a\x1cgoogle/api/annotations.proto\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x32google/bigtable/v1/bigtable_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\xdd\x08\n\x0f\x42igtableService\x12\xa5\x01\n\x08ReadRows\x12#.google.bigtable.v1.ReadRowsRequest\x1a$.google.bigtable.v1.ReadRowsResponse\"L\x82\xd3\xe4\x93\x02\x46\"A/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read:\x01*0\x01\x12\xb7\x01\n\rSampleRowKeys\x12(.google.bigtable.v1.SampleRowKeysRequest\x1a).google.bigtable.v1.SampleRowKeysResponse\"O\x82\xd3\xe4\x93\x02I\x12G/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys0\x01\x12\xa3\x01\n\tMutateRow\x12$.google.bigtable.v1.MutateRowRequest\x1a\x16.google.protobuf.Empty\"X\x82\xd3\xe4\x93\x02R\"M/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate:\x01*\x12\xaa\x01\n\nMutateRows\x12%.google.bigtable.v1.MutateRowsRequest\x1a&.google.bigtable.v1.MutateRowsResponse\"M\x82\xd3\xe4\x93\x02G\"B/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows:\x01*\x12\xd2\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v1.CheckAndMutateRowRequest\x1a-.google.bigtable.v1.CheckAndMutateRowResponse\"`\x82\xd3\xe4\x93\x02Z\"U/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate:\x01*\x12\xbf\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v1.ReadModifyWriteRowRequest\x1a\x17.google.bigtable.v1.Row\"a\x82\xd3\xe4\x93\x02[\"V/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite:\x01*B4\n\x16\x63om.google.bigtable.v1B\x15\x42igtableServicesProtoP\x01\x88\x01\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\025BigtableServicesProtoP\001\210\001\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ReadRows(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def SampleRowKeys(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def MutateRow(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def MutateRows(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CheckAndMutateRow(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ReadModifyWriteRow(self, request, context):
raise NotImplementedError()
class BetaBigtableServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ReadRows(self, request, timeout):
raise NotImplementedError()
@abc.abstractmethod
def SampleRowKeys(self, request, timeout):
raise NotImplementedError()
@abc.abstractmethod
def MutateRow(self, request, timeout):
raise NotImplementedError()
MutateRow.future = None
@abc.abstractmethod
def MutateRows(self, request, timeout):
raise NotImplementedError()
MutateRows.future = None
@abc.abstractmethod
def CheckAndMutateRow(self, request, timeout):
raise NotImplementedError()
CheckAndMutateRow.future = None
@abc.abstractmethod
def ReadModifyWriteRow(self, request, timeout):
raise NotImplementedError()
ReadModifyWriteRow.future = None
def beta_create_BigtableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_data_pb2
request_deserializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.FromString,
('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.FromString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.FromString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.FromString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.FromString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.FromString,
}
response_serializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.SerializeToString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.SerializeToString,
}
method_implementations = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow),
('google.bigtable.v1.BigtableService', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow),
('google.bigtable.v1.BigtableService', 'MutateRows'): face_utilities.unary_unary_inline(servicer.MutateRows),
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow),
('google.bigtable.v1.BigtableService', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows),
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_data_pb2
request_serializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.SerializeToString,
}
response_deserializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.FromString,
('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.FromString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.FromString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.FromString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.FromString,
}
cardinalities = {
'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY,
'MutateRow': cardinality.Cardinality.UNARY_UNARY,
'MutateRows': cardinality.Cardinality.UNARY_UNARY,
'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY,
'ReadRows': cardinality.Cardinality.UNARY_STREAM,
'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.v1.BigtableService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,377 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_data.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_data.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\n8google/bigtable/admin/table/v1/bigtable_table_data.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xfd\x02\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x11\x63urrent_operation\x18\x02 \x01(\x0b\x32\x1d.google.longrunning.Operation\x12R\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x39.google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry\x12O\n\x0bgranularity\x18\x04 \x01(\x0e\x32:.google.bigtable.admin.table.v1.Table.TimestampGranularity\x1a\x63\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily:\x02\x38\x01\"\"\n\x14TimestampGranularity\x12\n\n\x06MILLIS\x10\x00\"l\n\x0c\x43olumnFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rgc_expression\x18\x02 \x01(\t\x12\x37\n\x07gc_rule\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\"\xed\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12K\n\x0cintersection\x18\x03 \x01(\x0b\x32\x33.google.bigtable.admin.table.v1.GcRule.IntersectionH\x00\x12=\n\x05union\x18\x04 \x01(\x0b\x32,.google.bigtable.admin.table.v1.GcRule.UnionH\x00\x1a\x45\n\x0cIntersection\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\x1a>\n\x05Union\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRuleB\x06\n\x04ruleB>\n\"com.google.bigtable.admin.table.v1B\x16\x42igtableTableDataProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor(
name='TimestampGranularity',
full_name='google.bigtable.admin.table.v1.Table.TimestampGranularity',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MILLIS', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=509,
serialized_end=543,
)
_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY)
_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor(
name='ColumnFamiliesEntry',
full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=408,
serialized_end=507,
)
_TABLE = _descriptor.Descriptor(
name='Table',
full_name='google.bigtable.admin.table.v1.Table',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.Table.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_operation', full_name='google.bigtable.admin.table.v1.Table.current_operation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_families', full_name='google.bigtable.admin.table.v1.Table.column_families', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='granularity', full_name='google.bigtable.admin.table.v1.Table.granularity', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TABLE_COLUMNFAMILIESENTRY, ],
enum_types=[
_TABLE_TIMESTAMPGRANULARITY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=543,
)
_COLUMNFAMILY = _descriptor.Descriptor(
name='ColumnFamily',
full_name='google.bigtable.admin.table.v1.ColumnFamily',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.ColumnFamily.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gc_expression', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_expression', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gc_rule', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_rule', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=545,
serialized_end=653,
)
_GCRULE_INTERSECTION = _descriptor.Descriptor(
name='Intersection',
full_name='google.bigtable.admin.table.v1.GcRule.Intersection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Intersection.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=880,
serialized_end=949,
)
_GCRULE_UNION = _descriptor.Descriptor(
name='Union',
full_name='google.bigtable.admin.table.v1.GcRule.Union',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Union.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=951,
serialized_end=1013,
)
_GCRULE = _descriptor.Descriptor(
name='GcRule',
full_name='google.bigtable.admin.table.v1.GcRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_num_versions', full_name='google.bigtable.admin.table.v1.GcRule.max_num_versions', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_age', full_name='google.bigtable.admin.table.v1.GcRule.max_age', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='intersection', full_name='google.bigtable.admin.table.v1.GcRule.intersection', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='union', full_name='google.bigtable.admin.table.v1.GcRule.union', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule', full_name='google.bigtable.admin.table.v1.GcRule.rule',
index=0, containing_type=None, fields=[]),
],
serialized_start=656,
serialized_end=1021,
)
_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY
_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE
_TABLE.fields_by_name['current_operation'].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION
_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY
_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY
_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE
_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE
_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE
_GCRULE_INTERSECTION.containing_type = _GCRULE
_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE
_GCRULE_UNION.containing_type = _GCRULE
_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION
_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['max_num_versions'])
_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['max_age'])
_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['intersection'])
_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['union'])
_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule']
DESCRIPTOR.message_types_by_name['Table'] = _TABLE
DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY
DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE
Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict(
ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict(
DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry)
))
,
DESCRIPTOR = _TABLE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table)
))
_sym_db.RegisterMessage(Table)
_sym_db.RegisterMessage(Table.ColumnFamiliesEntry)
ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict(
DESCRIPTOR = _COLUMNFAMILY,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ColumnFamily)
))
_sym_db.RegisterMessage(ColumnFamily)
GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict(
Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict(
DESCRIPTOR = _GCRULE_INTERSECTION,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Intersection)
))
,
Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict(
DESCRIPTOR = _GCRULE_UNION,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Union)
))
,
DESCRIPTOR = _GCRULE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule)
))
_sym_db.RegisterMessage(GcRule)
_sym_db.RegisterMessage(GcRule.Intersection)
_sym_db.RegisterMessage(GcRule.Union)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\026BigtableTableDataProtoP\001')
_TABLE_COLUMNFAMILIESENTRY.has_options = True
_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,389 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_service_messages.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\nDgoogle/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\"\x86\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x34\n\x05table\x18\x03 \x01(\x0b\x32%.google.bigtable.admin.table.v1.Table\x12\x1a\n\x12initial_split_keys\x18\x04 \x03(\t\"!\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"K\n\x12ListTablesResponse\x12\x35\n\x06tables\x18\x01 \x03(\x0b\x32%.google.bigtable.admin.table.v1.Table\"\x1f\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"2\n\x12RenameTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06new_id\x18\x02 \x01(\t\"\x88\x01\n\x19\x43reateColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_family_id\x18\x02 \x01(\t\x12\x43\n\rcolumn_family\x18\x03 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily\")\n\x19\x44\x65leteColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\tBI\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CREATETABLEREQUEST = _descriptor.Descriptor(
name='CreateTableRequest',
full_name='google.bigtable.admin.table.v1.CreateTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.CreateTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table_id', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initial_split_keys', full_name='google.bigtable.admin.table.v1.CreateTableRequest.initial_split_keys', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=297,
)
_LISTTABLESREQUEST = _descriptor.Descriptor(
name='ListTablesRequest',
full_name='google.bigtable.admin.table.v1.ListTablesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.ListTablesRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=299,
serialized_end=332,
)
_LISTTABLESRESPONSE = _descriptor.Descriptor(
name='ListTablesResponse',
full_name='google.bigtable.admin.table.v1.ListTablesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tables', full_name='google.bigtable.admin.table.v1.ListTablesResponse.tables', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=334,
serialized_end=409,
)
_GETTABLEREQUEST = _descriptor.Descriptor(
name='GetTableRequest',
full_name='google.bigtable.admin.table.v1.GetTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.GetTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=411,
serialized_end=442,
)
_DELETETABLEREQUEST = _descriptor.Descriptor(
name='DeleteTableRequest',
full_name='google.bigtable.admin.table.v1.DeleteTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.DeleteTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=444,
serialized_end=478,
)
_RENAMETABLEREQUEST = _descriptor.Descriptor(
name='RenameTableRequest',
full_name='google.bigtable.admin.table.v1.RenameTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.RenameTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_id', full_name='google.bigtable.admin.table.v1.RenameTableRequest.new_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=480,
serialized_end=530,
)
_CREATECOLUMNFAMILYREQUEST = _descriptor.Descriptor(
name='CreateColumnFamilyRequest',
full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_family_id', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_family', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=533,
serialized_end=669,
)
_DELETECOLUMNFAMILYREQUEST = _descriptor.Descriptor(
name='DeleteColumnFamilyRequest',
full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=671,
serialized_end=712,
)
_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE
_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE
_CREATECOLUMNFAMILYREQUEST.fields_by_name['column_family'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._COLUMNFAMILY
DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST
DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST
DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE
DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST
DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST
DESCRIPTOR.message_types_by_name['RenameTableRequest'] = _RENAMETABLEREQUEST
DESCRIPTOR.message_types_by_name['CreateColumnFamilyRequest'] = _CREATECOLUMNFAMILYREQUEST
DESCRIPTOR.message_types_by_name['DeleteColumnFamilyRequest'] = _DELETECOLUMNFAMILYREQUEST
CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateTableRequest)
))
_sym_db.RegisterMessage(CreateTableRequest)
ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesRequest)
))
_sym_db.RegisterMessage(ListTablesRequest)
ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESRESPONSE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesResponse)
))
_sym_db.RegisterMessage(ListTablesResponse)
GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GetTableRequest)
))
_sym_db.RegisterMessage(GetTableRequest)
DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteTableRequest)
))
_sym_db.RegisterMessage(DeleteTableRequest)
RenameTableRequest = _reflection.GeneratedProtocolMessageType('RenameTableRequest', (_message.Message,), dict(
DESCRIPTOR = _RENAMETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.RenameTableRequest)
))
_sym_db.RegisterMessage(RenameTableRequest)
CreateColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('CreateColumnFamilyRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECOLUMNFAMILYREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateColumnFamilyRequest)
))
_sym_db.RegisterMessage(CreateColumnFamilyRequest)
DeleteColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('DeleteColumnFamilyRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECOLUMNFAMILYREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteColumnFamilyRequest)
))
_sym_db.RegisterMessage(DeleteColumnFamilyRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,203 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2
from gcloud.bigtable._generated import bigtable_table_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_service.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\n;google/bigtable/admin/table/v1/bigtable_table_service.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\x1a\x44google/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\x89\x0b\n\x14\x42igtableTableService\x12\xa4\x01\n\x0b\x43reateTable\x12\x32.google.bigtable.admin.table.v1.CreateTableRequest\x1a%.google.bigtable.admin.table.v1.Table\":\x82\xd3\xe4\x93\x02\x34\"//v1/{name=projects/*/zones/*/clusters/*}/tables:\x01*\x12\xac\x01\n\nListTables\x12\x31.google.bigtable.admin.table.v1.ListTablesRequest\x1a\x32.google.bigtable.admin.table.v1.ListTablesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{name=projects/*/zones/*/clusters/*}/tables\x12\x9d\x01\n\x08GetTable\x12/.google.bigtable.admin.table.v1.GetTableRequest\x1a%.google.bigtable.admin.table.v1.Table\"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x94\x01\n\x0b\x44\x65leteTable\x12\x32.google.bigtable.admin.table.v1.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"9\x82\xd3\xe4\x93\x02\x33*1/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x9e\x01\n\x0bRenameTable\x12\x32.google.bigtable.admin.table.v1.RenameTableRequest\x1a\x16.google.protobuf.Empty\"C\x82\xd3\xe4\x93\x02=\"8/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename:\x01*\x12\xca\x01\n\x12\x43reateColumnFamily\x12\x39.google.bigtable.admin.table.v1.CreateColumnFamilyRequest\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"K\x82\xd3\xe4\x93\x02\x45\"@/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies:\x01*\x12\xbf\x01\n\x12UpdateColumnFamily\x12,.google.bigtable.admin.table.v1.ColumnFamily\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"M\x82\xd3\xe4\x93\x02G\x1a\x42/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}:\x01*\x12\xb3\x01\n\x12\x44\x65leteColumnFamily\x12\x39.google.bigtable.admin.table.v1.DeleteColumnFamilyRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44*B/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}BB\n\"com.google.bigtable.admin.table.v1B\x1a\x42igtableTableServicesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\032BigtableTableServicesProtoP\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableTableServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListTables(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def RenameTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CreateColumnFamily(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateColumnFamily(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteColumnFamily(self, request, context):
raise NotImplementedError()
class BetaBigtableTableServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateTable(self, request, timeout):
raise NotImplementedError()
CreateTable.future = None
@abc.abstractmethod
def ListTables(self, request, timeout):
raise NotImplementedError()
ListTables.future = None
@abc.abstractmethod
def GetTable(self, request, timeout):
raise NotImplementedError()
GetTable.future = None
@abc.abstractmethod
def DeleteTable(self, request, timeout):
raise NotImplementedError()
DeleteTable.future = None
@abc.abstractmethod
def RenameTable(self, request, timeout):
raise NotImplementedError()
RenameTable.future = None
@abc.abstractmethod
def CreateColumnFamily(self, request, timeout):
raise NotImplementedError()
CreateColumnFamily.future = None
@abc.abstractmethod
def UpdateColumnFamily(self, request, timeout):
raise NotImplementedError()
UpdateColumnFamily.future = None
@abc.abstractmethod
def DeleteColumnFamily(self, request, timeout):
raise NotImplementedError()
DeleteColumnFamily.future = None
def beta_create_BigtableTableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
request_deserializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
}
response_serializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
}
method_implementations = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): face_utilities.unary_unary_inline(servicer.CreateColumnFamily),
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): face_utilities.unary_unary_inline(servicer.DeleteColumnFamily),
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables),
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): face_utilities.unary_unary_inline(servicer.RenameTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): face_utilities.unary_unary_inline(servicer.UpdateColumnFamily),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableTableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
request_serializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
}
response_deserializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
}
cardinalities = {
'CreateColumnFamily': cardinality.Cardinality.UNARY_UNARY,
'CreateTable': cardinality.Cardinality.UNARY_UNARY,
'DeleteColumnFamily': cardinality.Cardinality.UNARY_UNARY,
'DeleteTable': cardinality.Cardinality.UNARY_UNARY,
'GetTable': cardinality.Cardinality.UNARY_UNARY,
'ListTables': cardinality.Cardinality.UNARY_UNARY,
'RenameTable': cardinality.Cardinality.UNARY_UNARY,
'UpdateColumnFamily': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.table.v1.BigtableTableService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,100 @@
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaOperationsServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetOperation(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListOperations(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CancelOperation(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteOperation(self, request, context):
raise NotImplementedError()
class BetaOperationsStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetOperation(self, request, timeout):
raise NotImplementedError()
GetOperation.future = None
@abc.abstractmethod
def ListOperations(self, request, timeout):
raise NotImplementedError()
ListOperations.future = None
@abc.abstractmethod
def CancelOperation(self, request, timeout):
raise NotImplementedError()
CancelOperation.future = None
@abc.abstractmethod
def DeleteOperation(self, request, timeout):
raise NotImplementedError()
DeleteOperation.future = None
def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
request_deserializers = {
('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.FromString,
('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.FromString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.FromString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.FromString,
}
response_serializers = {
('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.SerializeToString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.SerializeToString,
}
method_implementations = {
('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation),
('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation),
('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation),
('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
request_serializers = {
('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.SerializeToString,
('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.SerializeToString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.SerializeToString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.SerializeToString,
}
response_deserializers = {
('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.FromString,
('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.FromString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.FromString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.FromString,
}
cardinalities = {
'CancelOperation': cardinality.Cardinality.UNARY_UNARY,
'DeleteOperation': cardinality.Cardinality.UNARY_UNARY,
'GetOperation': cardinality.Cardinality.UNARY_UNARY,
'ListOperations': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options)

View file

@ -0,0 +1,15 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated protobuf modules for Google Cloud Bigtable API."""

View file

@ -0,0 +1,321 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v2;
import "google/api/annotations.proto";
import "google/bigtable/v2/data.proto";
import "google/protobuf/wrappers.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableProto";
option java_package = "com.google.bigtable.v2";
// Service for reading from and writing to existing Bigtable tables.
service Bigtable {
// Streams back the contents of all requested rows, optionally
// applying the same Reader filter to each. Depending on their size,
// rows and cells may be broken up across multiple responses, but
// atomicity of each row will still be preserved. See the
// ReadRowsResponse documentation for details.
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" };
}
// Returns a sample of row keys in the table. The returned row keys will
// delimit contiguous sections of the table of approximately equal size,
// which can be used to break up the data for distributed tasks like
// mapreduces.
rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" };
}
// Mutates a row atomically. Cells already present in the row are left
// unchanged unless explicitly changed by `mutation`.
rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" };
}
// Mutates multiple rows in a batch. Each individual row is mutated
// atomically as in MutateRow, but the entire batch is not executed
// atomically.
rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" };
}
// Mutates a row atomically based on the output of a predicate Reader filter.
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" };
}
// Modifies a row atomically. The method reads the latest existing timestamp
// and value from the specified columns and writes a new entry based on
// pre-defined read/modify/write rules. The new value for the timestamp is the
// greater of the existing timestamp or the current server time. The method
// returns the new contents of all modified cells.
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) {
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" };
}
}
// Request message for Bigtable.ReadRows.
message ReadRowsRequest {
// The unique name of the table from which to read.
// Values are of the form
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
string table_name = 1;
// The row keys and/or ranges to read. If not specified, reads from all rows.
RowSet rows = 2;
// The filter to apply to the contents of the specified row(s). If unset,
// reads the entirety of each row.
RowFilter filter = 3;
// The read will terminate after committing to N rows' worth of results. The
// default (zero) is to return all results.
int64 rows_limit = 4;
}
// Response message for Bigtable.ReadRows.
message ReadRowsResponse {
// Specifies a piece of a row's contents returned as part of the read
// response stream.
message CellChunk {
// The row key for this chunk of data. If the row key is empty,
// this CellChunk is a continuation of the same row as the previous
// CellChunk in the response stream, even if that CellChunk was in a
// previous ReadRowsResponse message.
bytes row_key = 1;
// The column family name for this chunk of data. If this message
// is not present this CellChunk is a continuation of the same column
// family as the previous CellChunk. The empty string can occur as a
// column family name in a response so clients must check
// explicitly for the presence of this message, not just for
// `family_name.value` being non-empty.
google.protobuf.StringValue family_name = 2;
// The column qualifier for this chunk of data. If this message
// is not present, this CellChunk is a continuation of the same column
// as the previous CellChunk. Column qualifiers may be empty so
// clients must check for the presence of this message, not just
// for `qualifier.value` being non-empty.
google.protobuf.BytesValue qualifier = 3;
// The cell's stored timestamp, which also uniquely identifies it
// within its column. Values are always expressed in
// microseconds, but individual tables may set a coarser
// granularity to further restrict the allowed values. For
// example, a table which specifies millisecond granularity will
// only allow values of `timestamp_micros` which are multiples of
// 1000. Timestamps are only set in the first CellChunk per cell
// (for cells split into multiple chunks).
int64 timestamp_micros = 4;
// Labels applied to the cell by a
// [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
// on the first CellChunk per cell.
repeated string labels = 5;
// The value stored in the cell. Cell values can be split across
// multiple CellChunks. In that case only the value field will be
// set in CellChunks after the first: the timestamp and labels
// will only be present in the first CellChunk, even if the first
// CellChunk came in a previous ReadRowsResponse.
bytes value = 6;
// If this CellChunk is part of a chunked cell value and this is
// not the final chunk of that cell, value_size will be set to the
// total length of the cell value. The client can use this size
// to pre-allocate memory to hold the full cell value.
int32 value_size = 7;
oneof row_status {
// Indicates that the client should drop all previous chunks for
// `row_key`, as it will be re-read from the beginning.
bool reset_row = 8;
// Indicates that the client can safely process all previous chunks for
// `row_key`, as its data has been fully read.
bool commit_row = 9;
}
}
repeated CellChunk chunks = 1;
// Optionally the server might return the row key of the last row it
// has scanned. The client can use this to construct a more
// efficient retry request if needed: any row keys or portions of
// ranges less than this row key can be dropped from the request.
// This is primarily useful for cases where the server has read a
// lot of data that was filtered out since the last committed row
// key, allowing the client to skip that work on a retry.
bytes last_scanned_row_key = 2;
}
// Request message for Bigtable.SampleRowKeys.
message SampleRowKeysRequest {
// The unique name of the table from which to sample row keys.
// Values are of the form
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
string table_name = 1;
}
// Response message for Bigtable.SampleRowKeys.
message SampleRowKeysResponse {
// Sorted streamed sequence of sample row keys in the table. The table might
// have contents before the first row key in the list and after the last one,
// but a key containing the empty string indicates "end of table" and will be
// the last response given, if present.
// Note that row keys in this list may not have ever been written to or read
// from, and users should therefore not make any assumptions about the row key
// structure that are specific to their use case.
bytes row_key = 1;
// Approximate total storage space used by all rows in the table which precede
// `row_key`. Buffering the contents of all rows between two subsequent
// samples would require space roughly equal to the difference in their
// `offset_bytes` fields.
int64 offset_bytes = 2;
}
// Request message for Bigtable.MutateRow.
message MutateRowRequest {
// The unique name of the table to which the mutation should be applied.
// Values are of the form
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
string table_name = 1;
// The key of the row to which the mutation should be applied.
bytes row_key = 2;
// Changes to be atomically applied to the specified row. Entries are applied
// in order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry and at most 100000.
repeated Mutation mutations = 3;
}
// Response message for Bigtable.MutateRow.
message MutateRowResponse {
}
// Request message for BigtableService.MutateRows.
message MutateRowsRequest {
message Entry {
// The key of the row to which the `mutations` should be applied.
bytes row_key = 1;
// Changes to be atomically applied to the specified row. Mutations are
// applied in order, meaning that earlier mutations can be masked by
// later ones.
// You must specify at least one mutation.
repeated Mutation mutations = 2;
}
// The unique name of the table to which the mutations should be applied.
string table_name = 1;
// The row keys and corresponding mutations to be applied in bulk.
// Each entry is applied as an atomic mutation, but the entries may be
// applied in arbitrary order (even between entries for the same row).
// At least one entry must be specified, and in total the entries can
// contain at most 100000 mutations.
repeated Entry entries = 2;
}
// Response message for BigtableService.MutateRows.
message MutateRowsResponse {
message Entry {
// The index into the original request's `entries` list of the Entry
// for which a result is being reported.
int64 index = 1;
// The result of the request Entry identified by `index`.
// Depending on how requests are batched during execution, it is possible
// for one Entry to fail due to an error with another Entry. In the event
// that this occurs, the same error will be reported for both entries.
google.rpc.Status status = 2;
}
// One or more results for Entries from the batch request.
repeated Entry entries = 1;
}
// Request message for Bigtable.CheckAndMutateRow.
message CheckAndMutateRowRequest {
// The unique name of the table to which the conditional mutation should be
// applied.
// Values are of the form
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
string table_name = 1;
// The key of the row to which the conditional mutation should be applied.
bytes row_key = 2;
// The filter to be applied to the contents of the specified row. Depending
// on whether or not any results are yielded, either `true_mutations` or
// `false_mutations` will be executed. If unset, checks that the row contains
// any values at all.
RowFilter predicate_filter = 6;
// Changes to be atomically applied to the specified row if `predicate_filter`
// yields at least one cell when applied to `row_key`. Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if `false_mutations` is empty, and at most
// 100000.
repeated Mutation true_mutations = 4;
// Changes to be atomically applied to the specified row if `predicate_filter`
// does not yield any cells when applied to `row_key`. Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if `true_mutations` is empty, and at most
// 100000.
repeated Mutation false_mutations = 5;
}
// Response message for Bigtable.CheckAndMutateRow.
message CheckAndMutateRowResponse {
// Whether or not the request's `predicate_filter` yielded any results for
// the specified row.
bool predicate_matched = 1;
}
// Request message for Bigtable.ReadModifyWriteRow.
message ReadModifyWriteRowRequest {
// The unique name of the table to which the read/modify/write rules should be
// applied.
// Values are of the form
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
string table_name = 1;
// The key of the row to which the read/modify/write rules should be applied.
bytes row_key = 2;
// Rules specifying how the specified row's contents are to be transformed
// into writes. Entries are applied in order, meaning that earlier rules will
// affect the results of later ones.
repeated ReadModifyWriteRule rules = 3;
}
// Response message for Bigtable.ReadModifyWriteRow.
message ReadModifyWriteRowResponse {
// A Row containing the new contents of all cells modified by the request.
Row row = 1;
}

View file

@ -0,0 +1,232 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.v2;
import "google/api/annotations.proto";
import "google/bigtable/admin/v2/instance.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableInstanceAdminProto";
option java_package = "com.google.bigtable.admin.v2";
// Service for creating, configuring, and deleting Cloud Bigtable Instances and
// Clusters. Provides access to the Instance and Cluster schemas only, not the
// tables metadata or data stored in those tables.
service BigtableInstanceAdmin {
// Create an instance within a project.
rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" };
}
// Gets information about an instance.
rpc GetInstance(GetInstanceRequest) returns (Instance) {
option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" };
}
// Lists information about instances in a project.
rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" };
}
// Updates an instance within a project.
rpc UpdateInstance(Instance) returns (Instance) {
option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" };
}
// Delete an instance from a project.
rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" };
}
// Creates a cluster within an instance.
rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" };
}
// Gets information about a cluster.
rpc GetCluster(GetClusterRequest) returns (Cluster) {
option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" };
}
// Lists information about clusters in an instance.
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" };
}
// Updates a cluster within an instance.
rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) {
option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" };
}
// Deletes a cluster from an instance.
rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" };
}
}
// Request message for BigtableInstanceAdmin.CreateInstance.
message CreateInstanceRequest {
// The unique name of the project in which to create the new instance.
// Values are of the form projects/<project>
string parent = 1;
// The id to be used when referring to the new instance within its project,
// e.g. just the "myinstance" section of the full name
// "projects/myproject/instances/myinstance"
string instance_id = 2;
// The instance to create.
// Fields marked "@OutputOnly" must be left blank.
Instance instance = 3;
// The clusters to be created within the instance, mapped by desired
// cluster ID (e.g. just the "mycluster" part of the full name
// "projects/myproject/instances/myinstance/clusters/mycluster").
// Fields marked "@OutputOnly" must be left blank.
// Currently exactly one cluster must be specified.
map<string, Cluster> clusters = 4;
}
// Request message for BigtableInstanceAdmin.GetInstance.
message GetInstanceRequest {
// The unique name of the requested instance. Values are of the form
// projects/<project>/instances/<instance>
string name = 1;
}
// Request message for BigtableInstanceAdmin.ListInstances.
message ListInstancesRequest {
// The unique name of the project for which a list of instances is requested.
// Values are of the form projects/<project>
string parent = 1;
// The value of `next_page_token` returned by a previous call.
string page_token = 2;
}
// Response message for BigtableInstanceAdmin.ListInstances.
message ListInstancesResponse {
// The list of requested instances.
repeated Instance instances = 1;
// Locations from which Instance information could not be retrieved,
// due to an outage or some other transient condition.
// Instances whose Clusters are all in one of the failed locations
// may be missing from 'instances', and Instances with at least one
// Cluster in a failed location may only have partial information returned.
repeated string failed_locations = 2;
// Set if not all instances could be returned in a single response.
// Pass this value to `page_token` in another request to get the next
// page of results.
string next_page_token = 3;
}
// Request message for BigtableInstanceAdmin.DeleteInstance.
message DeleteInstanceRequest {
// The unique name of the instance to be deleted.
// Values are of the form projects/<project>/instances/<instance>
string name = 1;
}
// Request message for BigtableInstanceAdmin.CreateCluster.
message CreateClusterRequest {
// The unique name of the instance in which to create the new cluster.
// Values are of the form
// projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*
string parent = 1;
// The id to be used when referring to the new cluster within its instance,
// e.g. just the "mycluster" section of the full name
// "projects/myproject/instances/myinstance/clusters/mycluster"
string cluster_id = 2;
// The cluster to be created.
// Fields marked "@OutputOnly" must be left blank.
Cluster cluster = 3;
}
// Request message for BigtableInstanceAdmin.GetCluster.
message GetClusterRequest {
// The unique name of the requested cluster. Values are of the form
// projects/<project>/instances/<instance>/clusters/<cluster>
string name = 1;
}
// Request message for BigtableInstanceAdmin.ListClusters.
message ListClustersRequest {
// The unique name of the instance for which a list of clusters is requested.
// Values are of the form projects/<project>/instances/<instance>
// Use <instance> = '-' to list Clusters for all Instances in a project,
// for example "projects/myproject/instances/-"
string parent = 1;
// The value of `next_page_token` returned by a previous call.
string page_token = 2;
}
// Response message for BigtableInstanceAdmin.ListClusters.
message ListClustersResponse {
// The list of requested clusters.
repeated Cluster clusters = 1;
// Locations from which Cluster information could not be retrieved,
// due to an outage or some other transient condition.
// Clusters from these locations may be missing from 'clusters',
// or may only have partial information returned.
repeated string failed_locations = 2;
// Set if not all clusters could be returned in a single response.
// Pass this value to `page_token` in another request to get the next
// page of results.
string next_page_token = 3;
}
// Request message for BigtableInstanceAdmin.DeleteCluster.
message DeleteClusterRequest {
// The unique name of the cluster to be deleted. Values are of the form
// projects/<project>/instances/<instance>/clusters/<cluster>
string name = 1;
}
// The metadata for the Operation returned by CreateInstance.
message CreateInstanceMetadata {
// The request that prompted the initiation of this CreateInstance operation.
CreateInstanceRequest original_request = 1;
// The time at which the original request was received.
google.protobuf.Timestamp request_time = 2;
// The time at which the operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 3;
}
// The metadata for the Operation returned by UpdateCluster.
message UpdateClusterMetadata {
// The request that prompted the initiation of this UpdateCluster operation.
Cluster original_request = 1;
// The time at which the original request was received.
google.protobuf.Timestamp request_time = 2;
// The time at which the operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 3;
}

View file

@ -0,0 +1,195 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.v2;
import "google/api/annotations.proto";
import "google/bigtable/admin/v2/table.proto";
import "google/protobuf/empty.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableAdminProto";
option java_package = "com.google.bigtable.admin.v2";
// Service for creating, configuring, and deleting Cloud Bigtable tables.
// Provides access to the table schemas only, not the data stored within
// the tables.
service BigtableTableAdmin {
// Creates a new table in the specified instance.
// The table can be created with a full set of initial column families,
// specified in the request.
rpc CreateTable(CreateTableRequest) returns (Table) {
option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" };
}
// Lists all tables served from a specified instance.
rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" };
}
// Gets metadata information about the specified table.
rpc GetTable(GetTableRequest) returns (Table) {
option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" };
}
// Permanently deletes a specified table and all of its data.
rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" };
}
// Atomically performs a series of column family modifications
// on the specified table.
rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) {
option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" };
}
// Permanently drop/delete a row range from a specified table. The request can
// specify whether to delete all rows in a table, or only those that match a
// particular prefix.
rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" };
}
}
// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
message CreateTableRequest {
// An initial split point for a newly created table.
message Split {
// Row key to use as an initial tablet boundary.
bytes key = 1;
}
// The unique name of the instance in which to create the table.
// Values are of the form projects/<project>/instances/<instance>
string parent = 1;
// The name by which the new table should be referred to within the parent
// instance, e.g. "foobar" rather than "<parent>/tables/foobar".
string table_id = 2;
// The Table to create.
Table table = 3;
// The optional list of row keys that will be used to initially split the
// table into several tablets (Tablets are similar to HBase regions).
// Given two split keys, "s1" and "s2", three tablets will be created,
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
//
// Example:
// * Row keys := ["a", "apple", "custom", "customer_1", "customer_2",
// "other", "zz"]
// * initial_split_keys := ["apple", "customer_1", "customer_2", "other"]
// * Key assignment:
// - Tablet 1 [, apple) => {"a"}.
// - Tablet 2 [apple, customer_1) => {"apple", "custom"}.
// - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
// - Tablet 4 [customer_2, other) => {"customer_2"}.
// - Tablet 5 [other, ) => {"other", "zz"}.
repeated Split initial_splits = 4;
}
// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
message DropRowRangeRequest {
// The unique name of the table on which to drop a range of rows.
// Values are of the form projects/<project>/instances/<instance>/tables/<table>
string name = 1;
oneof target {
// Delete all rows that start with this row key prefix. Prefix cannot be
// zero length.
bytes row_key_prefix = 2;
// Delete all rows in the table. Setting this to false is a no-op.
bool delete_all_data_from_table = 3;
}
}
// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
message ListTablesRequest {
// The unique name of the instance for which tables should be listed.
// Values are of the form projects/<project>/instances/<instance>
string parent = 1;
// The view to be applied to the returned tables' fields.
// Defaults to NAME_ONLY if unspecified (no others are currently supported).
Table.View view = 2;
// The value of `next_page_token` returned by a previous call.
string page_token = 3;
}
// Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
message ListTablesResponse {
// The tables present in the requested cluster.
repeated Table tables = 1;
// Set if not all tables could be returned in a single response.
// Pass this value to `page_token` in another request to get the next
// page of results.
string next_page_token = 2;
}
// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable]
message GetTableRequest {
// The unique name of the requested table.
// Values are of the form projects/<project>/instances/<instance>/tables/<table>
string name = 1;
// The view to be applied to the returned table's fields.
// Defaults to SCHEMA_ONLY if unspecified.
Table.View view = 2;
}
// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable]
message DeleteTableRequest {
// The unique name of the table to be deleted.
// Values are of the form projects/<project>/instances/<instance>/tables/<table>
string name = 1;
}
// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies]
message ModifyColumnFamiliesRequest {
// A create, update, or delete of a particular column family.
message Modification {
// The ID of the column family to be modified.
string id = 1;
oneof mod {
// Create a new column family with the specified schema, or fail if
// one already exists with the given ID.
ColumnFamily create = 2;
// Update an existing column family to the specified schema, or fail
// if no column family exists with the given ID.
ColumnFamily update = 3;
// Drop (delete) the column family with the given ID, or fail if no such
// family exists.
bool drop = 4;
}
}
// The unique name of the table whose families should be modified.
// Values are of the form projects/<project>/instances/<instance>/tables/<table>
string name = 1;
// Modifications to be atomically applied to the specified table's families.
// Entries are applied in order, meaning that earlier modifications can be
// masked by later ones (in the case of repeated updates to the same family,
// for example).
repeated Modification modifications = 2;
}

View file

@ -0,0 +1,37 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.v2;
import "google/api/annotations.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
option java_outer_classname = "CommonProto";
option java_package = "com.google.bigtable.admin.v2";
// Storage media types for persisting Bigtable data.
enum StorageType {
// The user did not specify a storage type.
STORAGE_TYPE_UNSPECIFIED = 0;
// Flash (SSD) storage should be used.
SSD = 1;
// Magnetic drive (HDD) storage should be used.
HDD = 2;
}

View file

@ -0,0 +1,532 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v2;
option java_multiple_files = true;
option java_outer_classname = "DataProto";
option java_package = "com.google.bigtable.v2";
// Specifies the complete (requested) contents of a single row of a table.
// Rows which exceed 256MiB in size cannot be read in full.
message Row {
// The unique key which identifies this row within its table. This is the same
// key that's used to identify the row in, for example, a MutateRowRequest.
// May contain any non-empty byte string up to 4KiB in length.
bytes key = 1;
// May be empty, but only if the entire row is empty.
// The mutual ordering of column families is not specified.
repeated Family families = 2;
}
// Specifies (some of) the contents of a single row/column family intersection
// of a table.
message Family {
// The unique key which identifies this family within its row. This is the
// same key that's used to identify the family in, for example, a RowFilter
// which sets its "family_name_regex_filter" field.
// Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may
// produce cells in a sentinel family with an empty name.
// Must be no greater than 64 characters in length.
string name = 1;
// Must not be empty. Sorted in order of increasing "qualifier".
repeated Column columns = 2;
}
// Specifies (some of) the contents of a single row/column intersection of a
// table.
message Column {
// The unique key which identifies this column within its family. This is the
// same key that's used to identify the column in, for example, a RowFilter
// which sets its `column_qualifier_regex_filter` field.
// May contain any byte string, including the empty string, up to 16kiB in
// length.
bytes qualifier = 1;
// Must not be empty. Sorted in order of decreasing "timestamp_micros".
repeated Cell cells = 2;
}
// Specifies (some of) the contents of a single row/column/timestamp of a table.
message Cell {
// The cell's stored timestamp, which also uniquely identifies it within
// its column.
// Values are always expressed in microseconds, but individual tables may set
// a coarser granularity to further restrict the allowed values. For
// example, a table which specifies millisecond granularity will only allow
// values of `timestamp_micros` which are multiples of 1000.
int64 timestamp_micros = 1;
// The value stored in the cell.
// May contain any byte string, including the empty string, up to 100MiB in
// length.
bytes value = 2;
// Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter].
repeated string labels = 3;
}
// Specifies a contiguous range of rows.
message RowRange {
// The row key at which to start the range.
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_key {
// Used when giving an inclusive lower bound for the range.
bytes start_key_closed = 1;
// Used when giving an exclusive lower bound for the range.
bytes start_key_open = 2;
}
// The row key at which to end the range.
// If neither field is set, interpreted as the infinite row key, exclusive.
oneof end_key {
// Used when giving an inclusive upper bound for the range.
bytes end_key_open = 3;
// Used when giving an exclusive upper bound for the range.
bytes end_key_closed = 4;
}
}
// Specifies a non-contiguous set of rows.
message RowSet {
// Single rows included in the set.
repeated bytes row_keys = 1;
// Contiguous row ranges included in the set.
repeated RowRange row_ranges = 2;
}
// Specifies a contiguous range of columns within a single column family.
// The range spans from &lt;column_family&gt;:&lt;start_qualifier&gt; to
// &lt;column_family&gt;:&lt;end_qualifier&gt;, where both bounds can be either
// inclusive or exclusive.
message ColumnRange {
// The name of the column family within which this range falls.
string family_name = 1;
// The column qualifier at which to start the range (within `column_family`).
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_qualifier {
// Used when giving an inclusive lower bound for the range.
bytes start_qualifier_closed = 2;
// Used when giving an exclusive lower bound for the range.
bytes start_qualifier_open = 3;
}
// The column qualifier at which to end the range (within `column_family`).
// If neither field is set, interpreted as the infinite string, exclusive.
oneof end_qualifier {
// Used when giving an inclusive upper bound for the range.
bytes end_qualifier_closed = 4;
// Used when giving an exclusive upper bound for the range.
bytes end_qualifier_open = 5;
}
}
// Specified a contiguous range of microsecond timestamps.
message TimestampRange {
// Inclusive lower bound. If left empty, interpreted as 0.
int64 start_timestamp_micros = 1;
// Exclusive upper bound. If left empty, interpreted as infinity.
int64 end_timestamp_micros = 2;
}
// Specifies a contiguous range of raw byte values.
message ValueRange {
// The value at which to start the range.
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_value {
// Used when giving an inclusive lower bound for the range.
bytes start_value_closed = 1;
// Used when giving an exclusive lower bound for the range.
bytes start_value_open = 2;
}
// The value at which to end the range.
// If neither field is set, interpreted as the infinite string, exclusive.
oneof end_value {
// Used when giving an inclusive upper bound for the range.
bytes end_value_closed = 3;
// Used when giving an exclusive upper bound for the range.
bytes end_value_open = 4;
}
}
// Takes a row as input and produces an alternate view of the row based on
// specified rules. For example, a RowFilter might trim down a row to include
// just the cells from columns matching a given regular expression, or might
// return all the cells of a row but not their values. More complicated filters
// can be composed out of these components to express requests such as, "within
// every column of a particular family, give just the two most recent cells
// which are older than timestamp X."
//
// There are two broad categories of RowFilters (true filters and transformers),
// as well as two ways to compose simple filters into more complex ones
// (chains and interleaves). They work as follows:
//
// * True filters alter the input row by excluding some of its cells wholesale
// from the output row. An example of a true filter is the `value_regex_filter`,
// which excludes cells whose values don't match the specified pattern. All
// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
// important point to keep in mind is that `RE2(.)` is equivalent by default to
// `RE2([^\n])`, meaning that it does not match newlines. When attempting to
// match an arbitrary byte, you should therefore use the escape sequence `\C`,
// which may need to be further escaped as `\\C` in your client language.
//
// * Transformers alter the input row by changing the values of some of its
// cells in the output, without excluding them completely. Currently, the only
// supported transformer is the `strip_value_transformer`, which replaces every
// cell's value with the empty string.
//
// * Chains and interleaves are described in more detail in the
// RowFilter.Chain and RowFilter.Interleave documentation.
//
// The total serialized size of a RowFilter message must not
// exceed 4096 bytes, and RowFilters may not be nested within each other
// (in Chains or Interleaves) to a depth of more than 20.
message RowFilter {
// A RowFilter which sends rows through several RowFilters in sequence.
message Chain {
// The elements of "filters" are chained together to process the input row:
// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
// The full chain is executed atomically.
repeated RowFilter filters = 1;
}
// A RowFilter which sends each row to each of several component
// RowFilters and interleaves the results.
message Interleave {
// The elements of "filters" all process a copy of the input row, and the
// results are pooled, sorted, and combined into a single output row.
// If multiple cells are produced with the same column and timestamp,
// they will all appear in the output row in an unspecified mutual order.
// Consider the following example, with three filters:
//
// input row
// |
// -----------------------------------------------------
// | | |
// f(0) f(1) f(2)
// | | |
// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
// 2: foo,blah,11,z far,blah,5,x far,blah,5,x
// | | |
// -----------------------------------------------------
// |
// 1: foo,bar,10,z // could have switched with #2
// 2: foo,bar,10,x // could have switched with #1
// 3: foo,blah,11,z
// 4: far,bar,7,a
// 5: far,blah,5,x // identical to #6
// 6: far,blah,5,x // identical to #5
//
// All interleaved filters are executed atomically.
repeated RowFilter filters = 1;
}
// A RowFilter which evaluates one of two possible RowFilters, depending on
// whether or not a predicate RowFilter outputs any cells from the input row.
//
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
// true and false filters, which may lead to inconsistent or unexpected
// results. Additionally, Condition filters have poor performance, especially
// when filters are set for the false condition.
message Condition {
// If `predicate_filter` outputs any cells, then `true_filter` will be
// evaluated on the input row. Otherwise, `false_filter` will be evaluated.
RowFilter predicate_filter = 1;
// The filter to apply to the input row if `predicate_filter` returns any
// results. If not provided, no results will be returned in the true case.
RowFilter true_filter = 2;
// The filter to apply to the input row if `predicate_filter` does not
// return any results. If not provided, no results will be returned in the
// false case.
RowFilter false_filter = 3;
}
// Which of the possible RowFilter types to apply. If none are set, this
// RowFilter returns all cells in the input row.
oneof filter {
// Applies several RowFilters to the data in sequence, progressively
// narrowing the results.
Chain chain = 1;
// Applies several RowFilters to the data in parallel and combines the
// results.
Interleave interleave = 2;
// Applies one of two possible RowFilters to the data based on the output of
// a predicate RowFilter.
Condition condition = 3;
// ADVANCED USE ONLY.
// Hook for introspection into the RowFilter. Outputs all cells directly to
// the output of the read rather than to any parent filter. Consider the
// following example:
//
// Chain(
// FamilyRegex("A"),
// Interleave(
// All(),
// Chain(Label("foo"), Sink())
// ),
// QualifierRegex("B")
// )
//
// A,A,1,w
// A,B,2,x
// B,B,4,z
// |
// FamilyRegex("A")
// |
// A,A,1,w
// A,B,2,x
// |
// +------------+-------------+
// | |
// All() Label(foo)
// | |
// A,A,1,w A,A,1,w,labels:[foo]
// A,B,2,x A,B,2,x,labels:[foo]
// | |
// | Sink() --------------+
// | | |
// +------------+ x------+ A,A,1,w,labels:[foo]
// | A,B,2,x,labels:[foo]
// A,A,1,w |
// A,B,2,x |
// | |
// QualifierRegex("B") |
// | |
// A,B,2,x |
// | |
// +--------------------------------+
// |
// A,A,1,w,labels:[foo]
// A,B,2,x,labels:[foo] // could be switched
// A,B,2,x // could be switched
//
// Despite being excluded by the qualifier filter, a copy of every cell
// that reaches the sink is present in the final result.
//
// As with an [Interleave][google.bigtable.v2.RowFilter.Interleave],
// duplicate cells are possible, and appear in an unspecified mutual order.
// In this case we have a duplicate with column "A:B" and timestamp 2,
// because one copy passed through the all filter while the other was
// passed through the label and sink. Note that one copy has label "foo",
// while the other does not.
//
// Cannot be used within the `predicate_filter`, `true_filter`, or
// `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition].
bool sink = 16;
// Matches all cells, regardless of input. Functionally equivalent to
// leaving `filter` unset, but included for completeness.
bool pass_all_filter = 17;
// Does not match any cells, regardless of input. Useful for temporarily
// disabling just part of a filter.
bool block_all_filter = 18;
// Matches only cells from rows whose keys satisfy the given RE2 regex. In
// other words, passes through the entire row when the key matches, and
// otherwise produces an empty row.
// Note that, since row keys can contain arbitrary bytes, the `\C` escape
// sequence must be used if a true wildcard is desired. The `.` character
// will not match the new line character `\n`, which may be present in a
// binary key.
bytes row_key_regex_filter = 4;
// Matches all cells from a row with probability p, and matches no cells
// from the row with probability 1-p.
double row_sample_filter = 14;
// Matches only cells from columns whose families satisfy the given RE2
// regex. For technical reasons, the regex must not contain the `:`
// character, even if it is not being used as a literal.
// Note that, since column families cannot contain the new line character
// `\n`, it is sufficient to use `.` as a full wildcard when matching
// column family names.
string family_name_regex_filter = 5;
// Matches only cells from columns whose qualifiers satisfy the given RE2
// regex.
// Note that, since column qualifiers can contain arbitrary bytes, the `\C`
// escape sequence must be used if a true wildcard is desired. The `.`
// character will not match the new line character `\n`, which may be
// present in a binary qualifier.
bytes column_qualifier_regex_filter = 6;
// Matches only cells from columns within the given range.
ColumnRange column_range_filter = 7;
// Matches only cells with timestamps within the given range.
TimestampRange timestamp_range_filter = 8;
// Matches only cells with values that satisfy the given regular expression.
// Note that, since cell values can contain arbitrary bytes, the `\C` escape
// sequence must be used if a true wildcard is desired. The `.` character
// will not match the new line character `\n`, which may be present in a
// binary value.
bytes value_regex_filter = 9;
// Matches only cells with values that fall within the given range.
ValueRange value_range_filter = 15;
// Skips the first N cells of each row, matching all subsequent cells.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_offset_filter = 10;
// Matches only the first N cells of each row.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_limit_filter = 11;
// Matches only the most recent N cells within each column. For example,
// if N=2, this filter would match column `foo:bar` at timestamps 10 and 9,
// skip all earlier cells in `foo:bar`, and then begin matching again in
// column `foo:bar2`.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_column_limit_filter = 12;
// Replaces each cell's value with the empty string.
bool strip_value_transformer = 13;
// Applies the given label to all cells in the output row. This allows
// the client to determine which results were produced from which part of
// the filter.
//
// Values must be at most 15 characters in length, and match the RE2
// pattern `[a-z0-9\\-]+`
//
// Due to a technical limitation, it is not currently possible to apply
// multiple labels to a cell. As a result, a Chain may have no more than
// one sub-filter which contains a `apply_label_transformer`. It is okay for
// an Interleave to contain multiple `apply_label_transformers`, as they
// will be applied to separate copies of the input. This may be relaxed in
// the future.
string apply_label_transformer = 19;
}
}
// Specifies a particular change to be made to the contents of a row.
message Mutation {
// A Mutation which sets the value of the specified cell.
message SetCell {
// The name of the family into which new data should be written.
// Must match `[-_.a-zA-Z0-9]+`
string family_name = 1;
// The qualifier of the column into which new data should be written.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The timestamp of the cell into which new data should be written.
// Use -1 for current Bigtable server time.
// Otherwise, the client should set this value itself, noting that the
// default value is a timestamp of zero if the field is left unspecified.
// Values must match the granularity of the table (e.g. micros, millis).
int64 timestamp_micros = 3;
// The value to be written into the specified cell.
bytes value = 4;
}
// A Mutation which deletes cells from the specified column, optionally
// restricting the deletions to a given timestamp range.
message DeleteFromColumn {
// The name of the family from which cells should be deleted.
// Must match `[-_.a-zA-Z0-9]+`
string family_name = 1;
// The qualifier of the column from which cells should be deleted.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The range of timestamps within which cells should be deleted.
TimestampRange time_range = 3;
}
// A Mutation which deletes all cells from the specified column family.
message DeleteFromFamily {
// The name of the family from which cells should be deleted.
// Must match `[-_.a-zA-Z0-9]+`
string family_name = 1;
}
// A Mutation which deletes all cells from the containing row.
message DeleteFromRow {
}
// Which of the possible Mutation types to apply.
oneof mutation {
// Set a cell's value.
SetCell set_cell = 1;
// Deletes cells from a column.
DeleteFromColumn delete_from_column = 2;
// Deletes cells from a column family.
DeleteFromFamily delete_from_family = 3;
// Deletes cells from the entire row.
DeleteFromRow delete_from_row = 4;
}
}
// Specifies an atomic read/modify/write operation on the latest value of the
// specified column.
message ReadModifyWriteRule {
// The name of the family to which the read/modify/write should be applied.
// Must match `[-_.a-zA-Z0-9]+`
string family_name = 1;
// The qualifier of the column to which the read/modify/write should be
// applied.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The rule used to determine the column's new latest value from its current
// latest value.
oneof rule {
// Rule specifying that `append_value` be appended to the existing value.
// If the targeted cell is unset, it will be treated as containing the
// empty string.
bytes append_value = 3;
// Rule specifying that `increment_amount` be added to the existing value.
// If the targeted cell is unset, it will be treated as containing a zero.
// Otherwise, the targeted cell must contain an 8-byte value (interpreted
// as a 64-bit big-endian signed integer), or the entire request will fail.
int64 increment_amount = 4;
}
}

View file

@ -0,0 +1,113 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.v2;
import "google/api/annotations.proto";
import "google/bigtable/admin/v2/common.proto";
option java_multiple_files = true;
option java_outer_classname = "InstanceProto";
option java_package = "com.google.bigtable.admin.v2";
// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
// the resources that serve them.
// All tables in an instance are served from a single
// [Cluster][google.bigtable.admin.v2.Cluster].
message Instance {
// Possible states of an instance.
enum State {
// The state of the instance could not be determined.
STATE_NOT_KNOWN = 0;
// The instance has been successfully created and can serve requests
// to its tables.
READY = 1;
// The instance is currently being created, and may be destroyed
// if the creation process encounters an error.
CREATING = 2;
}
// @OutputOnly
// The unique name of the instance. Values are of the form
// projects/<project>/instances/[a-z][a-z0-9\\-]+[a-z0-9]
string name = 1;
// The descriptive name for this instance as it appears in UIs.
// Can be changed at any time, but should be kept globally unique
// to avoid confusion.
string display_name = 2;
//
// The current state of the instance.
State state = 3;
}
// A resizable group of nodes in a particular cloud location, capable
// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent
// [Instance][google.bigtable.admin.v2.Instance].
message Cluster {
// Possible states of a cluster.
enum State {
// The state of the cluster could not be determined.
STATE_NOT_KNOWN = 0;
// The cluster has been successfully created and is ready to serve requests.
READY = 1;
// The cluster is currently being created, and may be destroyed
// if the creation process encounters an error.
// A cluster may not be able to serve requests while being created.
CREATING = 2;
// The cluster is currently being resized, and may revert to its previous
// node count if the process encounters an error.
// A cluster is still capable of serving requests while being resized,
// but may exhibit performance as if its number of allocated nodes is
// between the starting and requested states.
RESIZING = 3;
// The cluster has no backing nodes. The data (tables) still
// exist, but no operations can be performed on the cluster.
DISABLED = 4;
}
// @OutputOnly
// The unique name of the cluster. Values are of the form
// projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*
string name = 1;
// @CreationOnly
// The location where this cluster's nodes and storage reside. For best
// performance, clients should be located as close as possible to this cluster.
// Currently only zones are supported, e.g. projects/*/locations/us-central1-b
string location = 2;
// @OutputOnly
// The current state of the cluster.
State state = 3;
// The number of nodes allocated to this cluster. More nodes enable higher
// throughput and more consistent performance.
int32 serve_nodes = 4;
// @CreationOnly
// The type of storage used by this cluster to serve its
// parent instance's tables, unless explicitly overridden.
StorageType default_storage_type = 5;
}

View file

@ -0,0 +1,144 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.longrunning;
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "OperationsProto";
option java_package = "com.google.longrunning";
// Manages long-running operations with an API service.
//
// When an API method normally takes long time to complete, it can be designed
// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
// interface to receive the real response asynchronously by polling the
// operation resource, or using `google.watcher.v1.Watcher` interface to watch
// the response, or pass the operation resource to another API (such as Google
// Cloud Pub/Sub API) to receive the response. Any API service that returns
// long-running operations should implement the `Operations` interface so
// developers can have a consistent client experience.
service Operations {
// Gets the latest state of a long-running operation. Clients may use this
// method to poll the operation result at intervals as recommended by the API
// service.
rpc GetOperation(GetOperationRequest) returns (Operation) {
option (google.api.http) = { get: "/v1/{name=operations/**}" };
}
// Lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
option (google.api.http) = { get: "/v1/{name=operations}" };
}
// Starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients may use
// [Operations.GetOperation] or other methods to check whether the
// cancellation succeeded or the operation completed despite cancellation.
rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
}
// Deletes a long-running operation. It indicates the client is no longer
// interested in the operation result. It does not cancel the operation.
rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=operations/**}" };
}
}
// This resource represents a long-running operation that is the result of a
// network API call.
message Operation {
// The name of the operation resource, which is only unique within the same
// service that originally returns it.
string name = 1;
// Some service-specific metadata associated with the operation. It typically
// contains progress information and common metadata such as create time.
// Some services may not provide such metadata. Any method that returns a
// long-running operation should document the metadata type, if any.
google.protobuf.Any metadata = 2;
// If the value is false, it means the operation is still in progress.
// If true, the operation is completed and the `result` is available.
bool done = 3;
oneof result {
// The error result of the operation in case of failure.
google.rpc.Status error = 4;
// The normal response of the operation in case of success. If the original
// method returns no data on success, such as `Delete`, the response will be
// `google.protobuf.Empty`. If the original method is standard
// `Get`/`Create`/`Update`, the response should be the resource. For other
// methods, the response should have the type `XxxResponse`, where `Xxx`
// is the original method name. For example, if the original method name
// is `TakeSnapshot()`, the inferred response type will be
// `TakeSnapshotResponse`.
google.protobuf.Any response = 5;
}
}
// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
message GetOperationRequest {
// The name of the operation resource.
string name = 1;
}
// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsRequest {
// The name of the operation collection.
string name = 4;
// The standard List filter.
string filter = 1;
// The standard List page size.
int32 page_size = 2;
// The standard List page token.
string page_token = 3;
}
// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsResponse {
// A list of operations that match the specified filter in the request.
repeated Operation operations = 1;
// The standard List next-page token.
string next_page_token = 2;
}
// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
message CancelOperationRequest {
// The name of the operation resource to be cancelled.
string name = 1;
}
// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
message DeleteOperationRequest {
// The name of the operation resource to be deleted.
string name = 1;
}

View file

@ -0,0 +1,115 @@
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.v2;
import "google/api/annotations.proto";
import "google/protobuf/duration.proto";
option java_multiple_files = true;
option java_outer_classname = "TableProto";
option java_package = "com.google.bigtable.admin.v2";
// A collection of user data indexed by row, column, and timestamp.
// Each table is served using the resources of its parent cluster.
message Table {
// Possible timestamp granularities to use when keeping multiple versions
// of data in a table.
enum TimestampGranularity {
// The user did not specify a granularity. Should not be returned.
// When specified during table creation, MILLIS will be used.
TIMESTAMP_GRANULARITY_UNSPECIFIED = 0;
// The table keeps data versioned at a granularity of 1ms.
MILLIS = 1;
}
// Defines a view over a table's fields.
enum View {
// Uses the default view for each method as documented in its request.
VIEW_UNSPECIFIED = 0;
// Only populates `name`.
NAME_ONLY = 1;
// Only populates `name` and fields related to the table's schema.
SCHEMA_VIEW = 2;
// Populates all fields.
FULL = 4;
}
// The unique name of the table. Values are of the form
// projects/<project>/instances/<instance>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
// Views: NAME_ONLY, SCHEMA_VIEW, REPLICATION_VIEW, FULL
// @OutputOnly
string name = 1;
// The column families configured for this table, mapped by column family ID.
// Views: SCHEMA_VIEW, FULL
// @CreationOnly
map<string, ColumnFamily> column_families = 3;
// The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
// this table. Timestamps not matching the granularity will be rejected.
// If unspecified at creation time, the value will be set to MILLIS.
// Views: SCHEMA_VIEW, FULL
// @CreationOnly
TimestampGranularity granularity = 4;
}
// A set of columns within a table which share a common configuration.
message ColumnFamily {
// Garbage collection rule specified as a protobuf.
// Must serialize to at most 500 bytes.
//
// NOTE: Garbage collection executes opportunistically in the background, and
// so it's possible for reads to return a cell even if it matches the active
// GC expression for its family.
GcRule gc_rule = 1;
}
// Rule for determining which cells to delete during garbage collection.
message GcRule {
// A GcRule which deletes cells matching all of the given rules.
message Intersection {
// Only delete cells which would be deleted by every element of `rules`.
repeated GcRule rules = 1;
}
// A GcRule which deletes cells matching any of the given rules.
message Union {
// Delete cells which would be deleted by any element of `rules`.
repeated GcRule rules = 1;
}
oneof rule {
// Delete all cells in a column except the most recent N.
int32 max_num_versions = 1;
// Delete cells in a column older than the given age.
// Values must be at least one millisecond, and will be truncated to
// microsecond granularity.
google.protobuf.Duration max_age = 2;
// Delete cells that would be deleted by every nested rule.
Intersection intersection = 3;
// Delete cells that would be deleted by any nested rule.
Union union = 4;
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,784 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/v2/bigtable_table_admin.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated_v2 import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/v2/bigtable_table_admin.proto',
package='google.bigtable.admin.v2',
syntax='proto3',
serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb8\x07\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor(
name='Split',
full_name='google.bigtable.admin.v2.CreateTableRequest.Split',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=359,
serialized_end=379,
)
_CREATETABLEREQUEST = _descriptor.Descriptor(
name='CreateTableRequest',
full_name='google.bigtable.admin.v2.CreateTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CREATETABLEREQUEST_SPLIT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=179,
serialized_end=379,
)
_DROPROWRANGEREQUEST = _descriptor.Descriptor(
name='DropRowRangeRequest',
full_name='google.bigtable.admin.v2.DropRowRangeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target',
index=0, containing_type=None, fields=[]),
],
serialized_start=381,
serialized_end=490,
)
_LISTTABLESREQUEST = _descriptor.Descriptor(
name='ListTablesRequest',
full_name='google.bigtable.admin.v2.ListTablesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=492,
serialized_end=599,
)
_LISTTABLESRESPONSE = _descriptor.Descriptor(
name='ListTablesResponse',
full_name='google.bigtable.admin.v2.ListTablesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=601,
serialized_end=695,
)
_GETTABLEREQUEST = _descriptor.Descriptor(
name='GetTableRequest',
full_name='google.bigtable.admin.v2.GetTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=697,
serialized_end=780,
)
_DELETETABLEREQUEST = _descriptor.Descriptor(
name='DeleteTableRequest',
full_name='google.bigtable.admin.v2.DeleteTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=782,
serialized_end=816,
)
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor(
name='Modification',
full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod',
index=0, containing_type=None, fields=[]),
],
serialized_start=956,
serialized_end=1121,
)
_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor(
name='ModifyColumnFamiliesRequest',
full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=819,
serialized_end=1121,
)
_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST
_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE
_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT
_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append(
_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'])
_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target']
_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append(
_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'])
_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target']
_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW
_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE
_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append(
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'])
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod']
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append(
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'])
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod']
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append(
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'])
_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod']
_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION
DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST
DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST
DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST
DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE
DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST
DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST
DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST
CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict(
Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict(
DESCRIPTOR = _CREATETABLEREQUEST_SPLIT,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split)
))
,
DESCRIPTOR = _CREATETABLEREQUEST,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest)
))
_sym_db.RegisterMessage(CreateTableRequest)
_sym_db.RegisterMessage(CreateTableRequest.Split)
DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict(
DESCRIPTOR = _DROPROWRANGEREQUEST,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest)
))
_sym_db.RegisterMessage(DropRowRangeRequest)
ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESREQUEST,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest)
))
_sym_db.RegisterMessage(ListTablesRequest)
ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESRESPONSE,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse)
))
_sym_db.RegisterMessage(ListTablesResponse)
GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTABLEREQUEST,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest)
))
_sym_db.RegisterMessage(GetTableRequest)
DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETETABLEREQUEST,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest)
))
_sym_db.RegisterMessage(DeleteTableRequest)
ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict(
Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict(
DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification)
))
,
DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST,
__module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest)
))
_sym_db.RegisterMessage(ModifyColumnFamiliesRequest)
_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001'))
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable',
request_serializer=CreateTableRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.ListTables = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListTables',
request_serializer=ListTablesRequest.SerializeToString,
response_deserializer=ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetTable',
request_serializer=GetTableRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable',
request_serializer=DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies',
request_serializer=ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange',
request_serializer=DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyColumnFamilies(self, request, context):
"""Atomically performs a series of column family modifications
on the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateTable': grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=CreateTableRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'ListTables': grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=ListTablesRequest.FromString,
response_serializer=ListTablesResponse.SerializeToString,
),
'GetTable': grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=GetTableRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'DeleteTable': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'DropRowRange': grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaBigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ModifyColumnFamilies(self, request, context):
"""Atomically performs a series of column family modifications
on the specified table.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaBigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
raise NotImplementedError()
CreateTable.future = None
def ListTables(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Lists all tables served from a specified instance.
"""
raise NotImplementedError()
ListTables.future = None
def GetTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Gets metadata information about the specified table.
"""
raise NotImplementedError()
GetTable.future = None
def DeleteTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Permanently deletes a specified table and all of its data.
"""
raise NotImplementedError()
DeleteTable.future = None
def ModifyColumnFamilies(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Atomically performs a series of column family modifications
on the specified table.
"""
raise NotImplementedError()
ModifyColumnFamilies.future = None
def DropRowRange(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
raise NotImplementedError()
DropRowRange.future = None
def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.FromString,
}
response_serializers = {
('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
}
method_implementations = {
('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable),
('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable),
('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): face_utilities.unary_unary_inline(servicer.DropRowRange),
('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable),
('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables),
('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): face_utilities.unary_unary_inline(servicer.ModifyColumnFamilies),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.SerializeToString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.SerializeToString,
}
response_deserializers = {
('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.FromString,
('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
}
cardinalities = {
'CreateTable': cardinality.Cardinality.UNARY_UNARY,
'DeleteTable': cardinality.Cardinality.UNARY_UNARY,
'DropRowRange': cardinality.Cardinality.UNARY_UNARY,
'GetTable': cardinality.Cardinality.UNARY_UNARY,
'ListTables': cardinality.Cardinality.UNARY_UNARY,
'ModifyColumnFamilies': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableTableAdmin', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,67 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/v2/common.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/v2/common.proto',
package='google.bigtable.admin.v2',
syntax='proto3',
serialized_pb=_b('\n%google/bigtable/admin/v2/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42-\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STORAGETYPE = _descriptor.EnumDescriptor(
name='StorageType',
full_name='google.bigtable.admin.v2.StorageType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STORAGE_TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SSD', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HDD', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=130,
serialized_end=191,
)
_sym_db.RegisterEnumDescriptor(_STORAGETYPE)
StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE)
STORAGE_TYPE_UNSPECIFIED = 0
SSD = 1
HDD = 2
DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001'))
# @@protoc_insertion_point(module_scope)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,222 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/v2/instance.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated_v2 import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/v2/instance.proto',
package='google.bigtable.admin.v2',
syntax='proto3',
serialized_pb=_b('\n\'google/bigtable/admin/v2/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\"\x9e\x01\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\x42/\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_INSTANCE_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='google.bigtable.admin.v2.Instance.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_NOT_KNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATING', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=244,
serialized_end=297,
)
_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE)
_CLUSTER_STATE = _descriptor.EnumDescriptor(
name='State',
full_name='google.bigtable.admin.v2.Cluster.State',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_NOT_KNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CREATING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESIZING', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DISABLED', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=489,
serialized_end=570,
)
_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE)
_INSTANCE = _descriptor.Descriptor(
name='Instance',
full_name='google.bigtable.admin.v2.Instance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_INSTANCE_STATE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=297,
)
_CLUSTER = _descriptor.Descriptor(
name='Cluster',
full_name='google.bigtable.admin.v2.Cluster',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLUSTER_STATE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=300,
serialized_end=570,
)
_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE
_INSTANCE_STATE.containing_type = _INSTANCE
_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE
_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2._STORAGETYPE
_CLUSTER_STATE.containing_type = _CLUSTER
DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE
DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER
Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict(
DESCRIPTOR = _INSTANCE,
__module__ = 'google.bigtable.admin.v2.instance_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance)
))
_sym_db.RegisterMessage(Instance)
Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict(
DESCRIPTOR = _CLUSTER,
__module__ = 'google.bigtable.admin.v2.instance_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster)
))
_sym_db.RegisterMessage(Cluster)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001'))
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,264 @@
from google.longrunning.operations_pb2 import (
CancelOperationRequest,
DeleteOperationRequest,
GetOperationRequest,
ListOperationsRequest,
ListOperationsResponse,
Operation,
google_dot_protobuf_dot_empty__pb2,
)
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class OperationsStub(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or using `google.watcher.v1.Watcher` interface to watch
the response, or pass the operation resource to another API (such as Google
Cloud Pub/Sub API) to receive the response. Any API service that returns
long-running operations should implement the `Operations` interface so
developers can have a consistent client experience.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetOperation = channel.unary_unary(
'/google.longrunning.Operations/GetOperation',
request_serializer=GetOperationRequest.SerializeToString,
response_deserializer=Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/google.longrunning.Operations/ListOperations',
request_serializer=ListOperationsRequest.SerializeToString,
response_deserializer=ListOperationsResponse.FromString,
)
self.CancelOperation = channel.unary_unary(
'/google.longrunning.Operations/CancelOperation',
request_serializer=CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteOperation = channel.unary_unary(
'/google.longrunning.Operations/DeleteOperation',
request_serializer=DeleteOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class OperationsServicer(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or using `google.watcher.v1.Watcher` interface to watch
the response, or pass the operation resource to another API (such as Google
Cloud Pub/Sub API) to receive the response. Any API service that returns
long-running operations should implement the `Operations` interface so
developers can have a consistent client experience.
"""
def GetOperation(self, request, context):
"""Gets the latest state of a long-running operation. Clients may use this
method to poll the operation result at intervals as recommended by the API
service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients may use
[Operations.GetOperation] or other methods to check whether the
cancellation succeeded or the operation completed despite cancellation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOperation(self, request, context):
"""Deletes a long-running operation. It indicates the client is no longer
interested in the operation result. It does not cancel the operation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OperationsServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=GetOperationRequest.FromString,
response_serializer=Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=ListOperationsRequest.FromString,
response_serializer=ListOperationsResponse.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteOperation': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOperation,
request_deserializer=DeleteOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.longrunning.Operations', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaOperationsServicer(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or using `google.watcher.v1.Watcher` interface to watch
the response, or pass the operation resource to another API (such as Google
Cloud Pub/Sub API) to receive the response. Any API service that returns
long-running operations should implement the `Operations` interface so
developers can have a consistent client experience.
"""
def GetOperation(self, request, context):
"""Gets the latest state of a long-running operation. Clients may use this
method to poll the operation result at intervals as recommended by the API
service.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ListOperations(self, request, context):
"""Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CancelOperation(self, request, context):
"""Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients may use
[Operations.GetOperation] or other methods to check whether the
cancellation succeeded or the operation completed despite cancellation.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def DeleteOperation(self, request, context):
"""Deletes a long-running operation. It indicates the client is no longer
interested in the operation result. It does not cancel the operation.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaOperationsStub(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or using `google.watcher.v1.Watcher` interface to watch
the response, or pass the operation resource to another API (such as Google
Cloud Pub/Sub API) to receive the response. Any API service that returns
long-running operations should implement the `Operations` interface so
developers can have a consistent client experience.
"""
def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Gets the latest state of a long-running operation. Clients may use this
method to poll the operation result at intervals as recommended by the API
service.
"""
raise NotImplementedError()
GetOperation.future = None
def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
"""
raise NotImplementedError()
ListOperations.future = None
def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients may use
[Operations.GetOperation] or other methods to check whether the
cancellation succeeded or the operation completed despite cancellation.
"""
raise NotImplementedError()
CancelOperation.future = None
def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Deletes a long-running operation. It indicates the client is no longer
interested in the operation result. It does not cancel the operation.
"""
raise NotImplementedError()
DeleteOperation.future = None
def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString,
('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString,
('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString,
('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString,
}
response_serializers = {
('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString,
('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString,
}
method_implementations = {
('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation),
('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation),
('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation),
('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString,
('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString,
('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString,
('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString,
}
response_deserializers = {
('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString,
('google.longrunning.Operations', 'GetOperation'): Operation.FromString,
('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString,
}
cardinalities = {
'CancelOperation': cardinality.Cardinality.UNARY_UNARY,
'DeleteOperation': cardinality.Cardinality.UNARY_UNARY,
'GetOperation': cardinality.Cardinality.UNARY_UNARY,
'ListOperations': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options)

View file

@ -0,0 +1,393 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/v2/table.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/v2/table.proto',
package='google.bigtable.admin.v2',
syntax='proto3',
serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xa0\x03\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"F\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor(
name='TimestampGranularity',
full_name='google.bigtable.admin.v2.Table.TimestampGranularity',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MILLIS', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=400,
serialized_end=473,
)
_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY)
_TABLE_VIEW = _descriptor.EnumDescriptor(
name='View',
full_name='google.bigtable.admin.v2.Table.View',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='VIEW_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NAME_ONLY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SCHEMA_VIEW', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FULL', index=3, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=475,
serialized_end=545,
)
_sym_db.RegisterEnumDescriptor(_TABLE_VIEW)
_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor(
name='ColumnFamiliesEntry',
full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=305,
serialized_end=398,
)
_TABLE = _descriptor.Descriptor(
name='Table',
full_name='google.bigtable.admin.v2.Table',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.v2.Table.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=1,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TABLE_COLUMNFAMILIESENTRY, ],
enum_types=[
_TABLE_TIMESTAMPGRANULARITY,
_TABLE_VIEW,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=129,
serialized_end=545,
)
_COLUMNFAMILY = _descriptor.Descriptor(
name='ColumnFamily',
full_name='google.bigtable.admin.v2.ColumnFamily',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=547,
serialized_end=612,
)
_GCRULE_INTERSECTION = _descriptor.Descriptor(
name='Intersection',
full_name='google.bigtable.admin.v2.GcRule.Intersection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=827,
serialized_end=890,
)
_GCRULE_UNION = _descriptor.Descriptor(
name='Union',
full_name='google.bigtable.admin.v2.GcRule.Union',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=892,
serialized_end=948,
)
_GCRULE = _descriptor.Descriptor(
name='GcRule',
full_name='google.bigtable.admin.v2.GcRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule', full_name='google.bigtable.admin.v2.GcRule.rule',
index=0, containing_type=None, fields=[]),
],
serialized_start=615,
serialized_end=956,
)
_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY
_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE
_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY
_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY
_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE
_TABLE_VIEW.containing_type = _TABLE
_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE
_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE
_GCRULE_INTERSECTION.containing_type = _GCRULE
_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE
_GCRULE_UNION.containing_type = _GCRULE
_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION
_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['max_num_versions'])
_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['max_age'])
_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['intersection'])
_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['union'])
_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule']
DESCRIPTOR.message_types_by_name['Table'] = _TABLE
DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY
DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE
Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict(
ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict(
DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY,
__module__ = 'google.bigtable.admin.v2.table_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry)
))
,
DESCRIPTOR = _TABLE,
__module__ = 'google.bigtable.admin.v2.table_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table)
))
_sym_db.RegisterMessage(Table)
_sym_db.RegisterMessage(Table.ColumnFamiliesEntry)
ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict(
DESCRIPTOR = _COLUMNFAMILY,
__module__ = 'google.bigtable.admin.v2.table_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily)
))
_sym_db.RegisterMessage(ColumnFamily)
GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict(
Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict(
DESCRIPTOR = _GCRULE_INTERSECTION,
__module__ = 'google.bigtable.admin.v2.table_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection)
))
,
Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict(
DESCRIPTOR = _GCRULE_UNION,
__module__ = 'google.bigtable.admin.v2.table_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union)
))
,
DESCRIPTOR = _GCRULE,
__module__ = 'google.bigtable.admin.v2.table_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule)
))
_sym_db.RegisterMessage(GcRule)
_sym_db.RegisterMessage(GcRule.Intersection)
_sym_db.RegisterMessage(GcRule.Union)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001'))
_TABLE_COLUMNFAMILIESENTRY.has_options = True
_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,57 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocks used to emulate gRPC generated objects."""
class _FakeStub(object):
"""Acts as a gPRC stub."""
def __init__(self, *results):
self.results = results
self.method_calls = []
self._entered = 0
self._exited = []
def __enter__(self):
self._entered += 1
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited.append((exc_type, exc_val, exc_tb))
return True
def __getattr__(self, name):
# We need not worry about attributes set in constructor
# since __getattribute__ will handle them.
return _MethodMock(name, self)
class _MethodMock(object):
"""Mock for API method attached to a gRPC stub.
In the beta implementation, these are of type.
:class:`grpc.framework.crust.implementations._UnaryUnaryMultiCallable`
"""
def __init__(self, name, factory):
self._name = name
self._factory = factory
def __call__(self, *args, **kwargs):
"""Sync method meant to mock a gRPC stub request."""
self._factory.method_calls.append((self._name, args, kwargs))
curr_result, self._factory.results = (self._factory.results[0],
self._factory.results[1:])
return curr_result

View file

@ -0,0 +1,480 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parent client for calling the Google Cloud Bigtable API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`Client` owns a :class:`.Instance`
* a :class:`.Instance` owns a :class:`Table <gcloud.bigtable.table.Table>`
* a :class:`Table <gcloud.bigtable.table.Table>` owns a
:class:`ColumnFamily <.column_family.ColumnFamily>`
* a :class:`Table <gcloud.bigtable.table.Table>` owns a :class:`Row <.row.Row>`
(and all the cells in the row)
"""
from pkg_resources import get_distribution
from grpc.beta import implementations
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as instance_admin_v2_pb2)
# V1 table admin service
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
# V1 data service
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
operations_grpc_pb2 as operations_grpc_v2_pb2)
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
from gcloud.bigtable.instance import Instance
from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID
from gcloud.client import _ClientFactoryMixin
from gcloud.client import _ClientProjectMixin
from gcloud.credentials import get_credentials
TABLE_STUB_FACTORY_V2 = (
table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub)
TABLE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
"""Table Admin API request host."""
TABLE_ADMIN_PORT_V2 = 443
"""Table Admin API request port."""
INSTANCE_STUB_FACTORY_V2 = (
instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub)
INSTANCE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
"""Cluster Admin API request host."""
INSTANCE_ADMIN_PORT_V2 = 443
"""Cluster Admin API request port."""
DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub
DATA_API_HOST_V2 = 'bigtable.googleapis.com'
"""Data API request host."""
DATA_API_PORT_V2 = 443
"""Data API request port."""
OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v2_pb2.beta_create_Operations_stub
OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2
OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2
ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin'
"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data'
"""Scope for reading and writing table data."""
READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly'
"""Scope for reading table data."""
DEFAULT_TIMEOUT_SECONDS = 10
"""The default timeout to use for API requests."""
DEFAULT_USER_AGENT = 'gcloud-python/{0}'.format(
get_distribution('gcloud').version)
"""The default user agent for API requests."""
class Client(_ClientFactoryMixin, _ClientProjectMixin):
"""Client for interacting with Google Cloud Bigtable API.
.. note::
Since the Cloud Bigtable API requires the gRPC transport, no
``http`` argument is accepted by this class.
:type project: :class:`str` or :func:`unicode <unicode>`
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` or
:data:`NoneType <types.NoneType>`
:param credentials: (Optional) The OAuth2 Credentials to use for this
client. If not provided, defaults to the Google
Application Default Credentials.
:type read_only: bool
:param read_only: (Optional) Boolean indicating if the data scope should be
for reading only (or for writing as well). Defaults to
:data:`False`.
:type admin: bool
:param admin: (Optional) Boolean indicating if the client will be used to
interact with the Instance Admin or Table Admin APIs. This
requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`.
:type user_agent: str
:param user_agent: (Optional) The user agent to be used with API request.
Defaults to :const:`DEFAULT_USER_AGENT`.
:type timeout_seconds: int
:param timeout_seconds: Number of seconds for request time-out. If not
passed, defaults to
:const:`DEFAULT_TIMEOUT_SECONDS`.
:raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only``
and ``admin`` are :data:`True`
"""
def __init__(self, project=None, credentials=None,
read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT,
timeout_seconds=DEFAULT_TIMEOUT_SECONDS):
_ClientProjectMixin.__init__(self, project=project)
if credentials is None:
credentials = get_credentials()
if read_only and admin:
raise ValueError('A read-only client cannot also perform'
'administrative actions.')
scopes = []
if read_only:
scopes.append(READ_ONLY_SCOPE)
else:
scopes.append(DATA_SCOPE)
if admin:
scopes.append(ADMIN_SCOPE)
self._admin = bool(admin)
try:
credentials = credentials.create_scoped(scopes)
except AttributeError:
pass
self._credentials = credentials
self.user_agent = user_agent
self.timeout_seconds = timeout_seconds
# These will be set in start().
self._data_stub_internal = None
self._instance_stub_internal = None
self._operations_stub_internal = None
self._table_stub_internal = None
def copy(self):
"""Make a copy of this client.
Copies the local data stored as simple types but does not copy the
current state of any open connections with the Cloud Bigtable API.
:rtype: :class:`.Client`
:returns: A copy of the current client.
"""
credentials = self._credentials
copied_creds = credentials.create_scoped(credentials.scopes)
return self.__class__(
self.project,
copied_creds,
READ_ONLY_SCOPE in copied_creds.scopes,
self._admin,
self.user_agent,
self.timeout_seconds,
)
@property
def credentials(self):
"""Getter for client's credentials.
:rtype:
:class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>`
:returns: The credentials stored on the client.
"""
return self._credentials
@property
def project_name(self):
"""Project name to be used with Instance Admin API.
.. note::
This property will not change if ``project`` does not, but the
return value is not cached.
The project name is of the form
``"projects/{project}"``
:rtype: str
:returns: The project name to be used with the Cloud Bigtable Admin
API RPC service.
"""
return 'projects/' + self.project
@property
def _data_stub(self):
"""Getter for the gRPC stub used for the Data API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client has not been :meth:`start`-ed.
"""
if self._data_stub_internal is None:
raise ValueError('Client has not been started.')
return self._data_stub_internal
@property
def _instance_stub(self):
"""Getter for the gRPC stub used for the Instance Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._instance_stub_internal is None:
raise ValueError('Client has not been started.')
return self._instance_stub_internal
@property
def _operations_stub(self):
"""Getter for the gRPC stub used for the Operations API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._operations_stub_internal is None:
raise ValueError('Client has not been started.')
return self._operations_stub_internal
@property
def _table_stub(self):
"""Getter for the gRPC stub used for the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
:raises: :class:`ValueError <exceptions.ValueError>` if the current
client is not an admin client or if it has not been
:meth:`start`-ed.
"""
if not self._admin:
raise ValueError('Client is not an admin client.')
if self._table_stub_internal is None:
raise ValueError('Client has not been started.')
return self._table_stub_internal
def _make_data_stub(self):
"""Creates gRPC stub to make requests to the Data API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, DATA_STUB_FACTORY_V2,
DATA_API_HOST_V2, DATA_API_PORT_V2)
def _make_instance_stub(self):
"""Creates gRPC stub to make requests to the Instance Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, INSTANCE_STUB_FACTORY_V2,
INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2)
def _make_operations_stub(self):
"""Creates gRPC stub to make requests to the Operations API.
These are for long-running operations of the Instance Admin API,
hence the host and port matching.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, OPERATIONS_STUB_FACTORY_V2,
OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2)
def _make_table_stub(self):
"""Creates gRPC stub to make requests to the Table Admin API.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: A gRPC stub object.
"""
return _make_stub(self, TABLE_STUB_FACTORY_V2,
TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2)
def is_started(self):
"""Check if the client has been started.
:rtype: bool
:returns: Boolean indicating if the client has been started.
"""
return self._data_stub_internal is not None
def start(self):
"""Prepare the client to make requests.
Activates gRPC contexts for making requests to the Bigtable
Service(s).
"""
if self.is_started():
return
# NOTE: We __enter__ the stubs more-or-less permanently. This is
# because only after entering the context managers is the
# connection created. We don't want to immediately close
# those connections since the client will make many
# requests with it over HTTP/2.
self._data_stub_internal = self._make_data_stub()
self._data_stub_internal.__enter__()
if self._admin:
self._instance_stub_internal = self._make_instance_stub()
self._operations_stub_internal = self._make_operations_stub()
self._table_stub_internal = self._make_table_stub()
self._instance_stub_internal.__enter__()
self._operations_stub_internal.__enter__()
self._table_stub_internal.__enter__()
def __enter__(self):
"""Starts the client as a context manager."""
self.start()
return self
def stop(self):
"""Closes all the open gRPC clients."""
if not self.is_started():
return
# When exit-ing, we pass None as the exception type, value and
# traceback to __exit__.
self._data_stub_internal.__exit__(None, None, None)
if self._admin:
self._instance_stub_internal.__exit__(None, None, None)
self._operations_stub_internal.__exit__(None, None, None)
self._table_stub_internal.__exit__(None, None, None)
self._data_stub_internal = None
self._instance_stub_internal = None
self._operations_stub_internal = None
self._table_stub_internal = None
def __exit__(self, exc_type, exc_val, exc_t):
"""Stops the client as a context manager."""
self.stop()
def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID,
display_name=None, serve_nodes=DEFAULT_SERVE_NODES):
"""Factory to create a instance associated with this client.
:type instance_id: str
:param instance_id: The ID of the instance.
:type location: string
:param location: location name, in form
``projects/<project>/locations/<location>``; used to
set up the instance's cluster.
:type display_name: str
:param display_name: (Optional) The display name for the instance in
the Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
:rtype: :class:`.Instance`
:returns: an instance owned by this client.
"""
return Instance(instance_id, self, location,
display_name=display_name, serve_nodes=serve_nodes)
def list_instances(self):
"""List instances owned by the project.
:rtype: tuple
:returns: A pair of results, the first is a list of
:class:`.Instance` objects returned and the second is a
list of strings (the failed locations in the request).
"""
request_pb = instance_admin_v2_pb2.ListInstancesRequest(
parent=self.project_name)
response = self._instance_stub.ListInstances(
request_pb, self.timeout_seconds)
instances = [Instance.from_pb(instance_pb, self)
for instance_pb in response.instances]
return instances, response.failed_locations
class _MetadataPlugin(object):
"""Callable class to transform metadata for gRPC requests.
:type client: :class:`.client.Client`
:param client: The client that owns the instance.
Provides authorization and user agent.
"""
def __init__(self, client):
self._credentials = client.credentials
self._user_agent = client.user_agent
def __call__(self, unused_context, callback):
"""Adds authorization header to request metadata."""
access_token = self._credentials.get_access_token().access_token
headers = [
('Authorization', 'Bearer ' + access_token),
('User-agent', self._user_agent),
]
callback(headers, None)
def _make_stub(client, stub_factory, host, port):
"""Makes a stub for an RPC service.
Uses / depends on the beta implementation of gRPC.
:type client: :class:`.client.Client`
:param client: The client that owns the instance.
Provides authorization and user agent.
:type stub_factory: callable
:param stub_factory: A factory which will create a gRPC stub for
a given service.
:type host: str
:param host: The host for the service.
:type port: int
:param port: The port for the service.
:rtype: :class:`grpc.beta._stub._AutoIntermediary`
:returns: The stub object used to make gRPC requests to a given API.
"""
# Leaving the first argument to ssl_channel_credentials() as None
# loads root certificates from `grpc/_adapter/credentials/roots.pem`.
transport_creds = implementations.ssl_channel_credentials(None, None, None)
custom_metadata_plugin = _MetadataPlugin(client)
auth_creds = implementations.metadata_call_credentials(
custom_metadata_plugin, name='google_creds')
channel_creds = implementations.composite_channel_credentials(
transport_creds, auth_creds)
channel = implementations.secure_channel(host, port, channel_creds)
return stub_factory(channel)

View file

@ -0,0 +1,384 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Cluster."""
import re
from google.longrunning import operations_pb2
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
_CLUSTER_NAME_RE = re.compile(r'^projects/(?P<project>[^/]+)/'
r'instances/(?P<instance>[^/]+)/clusters/'
r'(?P<cluster_id>[a-z][-a-z0-9]*)$')
_OPERATION_NAME_RE = re.compile(r'^operations/'
r'projects/([^/]+)/'
r'instances/([^/]+)/'
r'clusters/([a-z][-a-z0-9]*)/'
r'operations/(?P<operation_id>\d+)$')
_TYPE_URL_MAP = {
}
DEFAULT_SERVE_NODES = 3
"""Default number of nodes to use when creating a cluster."""
def _prepare_create_request(cluster):
"""Creates a protobuf request for a CreateCluster request.
:type cluster: :class:`Cluster`
:param cluster: The cluster to be created.
:rtype: :class:`.messages_v2_pb2.CreateClusterRequest`
:returns: The CreateCluster request object containing the cluster info.
"""
return messages_v2_pb2.CreateClusterRequest(
parent=cluster._instance.name,
cluster_id=cluster.cluster_id,
cluster=data_v2_pb2.Cluster(
serve_nodes=cluster.serve_nodes,
),
)
def _parse_pb_any_to_native(any_val, expected_type=None):
"""Convert a serialized "google.protobuf.Any" value to actual type.
:type any_val: :class:`google.protobuf.any_pb2.Any`
:param any_val: A serialized protobuf value container.
:type expected_type: str
:param expected_type: (Optional) The type URL we expect ``any_val``
to have.
:rtype: object
:returns: The de-serialized object.
:raises: :class:`ValueError <exceptions.ValueError>` if the
``expected_type`` does not match the ``type_url`` on the input.
"""
if expected_type is not None and expected_type != any_val.type_url:
raise ValueError('Expected type: %s, Received: %s' % (
expected_type, any_val.type_url))
container_class = _TYPE_URL_MAP[any_val.type_url]
return container_class.FromString(any_val.value)
def _process_operation(operation_pb):
"""Processes a create protobuf response.
:type operation_pb: :class:`google.longrunning.operations_pb2.Operation`
:param operation_pb: The long-running operation response from a
Create/Update/Undelete cluster request.
:rtype: tuple
:returns: integer ID of the operation (``operation_id``).
:raises: :class:`ValueError <exceptions.ValueError>` if the operation name
doesn't match the :data:`_OPERATION_NAME_RE` regex.
"""
match = _OPERATION_NAME_RE.match(operation_pb.name)
if match is None:
raise ValueError('Operation name was not in the expected '
'format after a cluster modification.',
operation_pb.name)
operation_id = int(match.group('operation_id'))
return operation_id
class Operation(object):
"""Representation of a Google API Long-Running Operation.
In particular, these will be the result of operations on
clusters using the Cloud Bigtable API.
:type op_type: str
:param op_type: The type of operation being performed. Expect
``create``, ``update`` or ``undelete``.
:type op_id: int
:param op_id: The ID of the operation.
:type cluster: :class:`Cluster`
:param cluster: The cluster that created the operation.
"""
def __init__(self, op_type, op_id, cluster=None):
self.op_type = op_type
self.op_id = op_id
self._cluster = cluster
self._complete = False
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.op_type == self.op_type and
other.op_id == self.op_id and
other._cluster == self._cluster and
other._complete == self._complete)
def __ne__(self, other):
return not self.__eq__(other)
def finished(self):
"""Check if the operation has finished.
:rtype: bool
:returns: A boolean indicating if the current operation has completed.
:raises: :class:`ValueError <exceptions.ValueError>` if the operation
has already completed.
"""
if self._complete:
raise ValueError('The operation has completed.')
operation_name = ('operations/' + self._cluster.name +
'/operations/%d' % (self.op_id,))
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
# We expect a `google.longrunning.operations_pb2.Operation`.
client = self._cluster._instance._client
operation_pb = client._operations_stub.GetOperation(
request_pb, client.timeout_seconds)
if operation_pb.done:
self._complete = True
return True
else:
return False
class Cluster(object):
"""Representation of a Google Cloud Bigtable Cluster.
We can use a :class:`Cluster` to:
* :meth:`reload` itself
* :meth:`create` itself
* :meth:`update` itself
* :meth:`delete` itself
* :meth:`undelete` itself
.. note::
For now, we leave out the ``default_storage_type`` (an enum)
which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type instance: :class:`.instance.Instance`
:param instance: The instance where the cluster resides.
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the cluster.
Defaults to :data:`DEFAULT_SERVE_NODES`.
"""
def __init__(self, cluster_id, instance,
serve_nodes=DEFAULT_SERVE_NODES):
self.cluster_id = cluster_id
self._instance = instance
self.serve_nodes = serve_nodes
self.location = None
def _update_from_pb(self, cluster_pb):
"""Refresh self from the server-provided protobuf.
Helper for :meth:`from_pb` and :meth:`reload`.
"""
if not cluster_pb.serve_nodes: # Simple field (int32)
raise ValueError('Cluster protobuf does not contain serve_nodes')
self.serve_nodes = cluster_pb.serve_nodes
self.location = cluster_pb.location
@classmethod
def from_pb(cls, cluster_pb, instance):
"""Creates a cluster instance from a protobuf.
:type cluster_pb: :class:`instance_pb2.Cluster`
:param cluster_pb: A cluster protobuf object.
:type instance: :class:`.instance.Instance>`
:param instance: The instance that owns the cluster.
:rtype: :class:`Cluster`
:returns: The cluster parsed from the protobuf response.
:raises:
:class:`ValueError <exceptions.ValueError>` if the cluster
name does not match
``projects/{project}/instances/{instance}/clusters/{cluster_id}``
or if the parsed project ID does not match the project ID
on the client.
"""
match = _CLUSTER_NAME_RE.match(cluster_pb.name)
if match is None:
raise ValueError('Cluster protobuf name was not in the '
'expected format.', cluster_pb.name)
if match.group('project') != instance._client.project:
raise ValueError('Project ID on cluster does not match the '
'project ID on the client')
if match.group('instance') != instance.instance_id:
raise ValueError('Instance ID on cluster does not match the '
'instance ID on the client')
result = cls(match.group('cluster_id'), instance)
result._update_from_pb(cluster_pb)
return result
def copy(self):
"""Make a copy of this cluster.
Copies the local data stored as simple types and copies the client
attached to this instance.
:rtype: :class:`.Cluster`
:returns: A copy of the current cluster.
"""
new_instance = self._instance.copy()
return self.__class__(self.cluster_id, new_instance,
serve_nodes=self.serve_nodes)
@property
def name(self):
"""Cluster name used in requests.
.. note::
This property will not change if ``_instance`` and ``cluster_id``
do not, but the return value is not cached.
The cluster name is of the form
``"projects/{project}/instances/{instance}/clusters/{cluster_id}"``
:rtype: str
:returns: The cluster name.
"""
return self._instance.name + '/clusters/' + self.cluster_id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# NOTE: This does not compare the configuration values, such as
# the serve_nodes. Instead, it only compares
# identifying values instance, cluster ID and client. This is
# intentional, since the same cluster can be in different states
# if not synchronized. Clusters with similar instance/cluster
# settings but different clients can't be used in the same way.
return (other.cluster_id == self.cluster_id and
other._instance == self._instance)
def __ne__(self, other):
return not self.__eq__(other)
def reload(self):
"""Reload the metadata for this cluster."""
request_pb = messages_v2_pb2.GetClusterRequest(name=self.name)
# We expect a `._generated_v2.instance_pb2.Cluster`.
cluster_pb = self._instance._client._instance_stub.GetCluster(
request_pb, self._instance._client.timeout_seconds)
# NOTE: _update_from_pb does not check that the project, instance and
# cluster ID on the response match the request.
self._update_from_pb(cluster_pb)
def create(self):
"""Create this cluster.
.. note::
Uses the ``project``, ``instance`` and ``cluster_id`` on the
current :class:`Cluster` in addition to the ``serve_nodes``.
To change them before creating, reset the values via
.. code:: python
cluster.serve_nodes = 8
cluster.cluster_id = 'i-changed-my-mind'
before calling :meth:`create`.
:rtype: :class:`Operation`
:returns: The long-running operation corresponding to the
create operation.
"""
request_pb = _prepare_create_request(self)
# We expect a `google.longrunning.operations_pb2.Operation`.
operation_pb = self._instance._client._instance_stub.CreateCluster(
request_pb, self._instance._client.timeout_seconds)
op_id = _process_operation(operation_pb)
return Operation('create', op_id, cluster=self)
def update(self):
"""Update this cluster.
.. note::
Updates the ``serve_nodes``. If you'd like to
change them before updating, reset the values via
.. code:: python
cluster.serve_nodes = 8
before calling :meth:`update`.
:rtype: :class:`Operation`
:returns: The long-running operation corresponding to the
update operation.
"""
request_pb = data_v2_pb2.Cluster(
name=self.name,
serve_nodes=self.serve_nodes,
)
# Ignore expected `._generated_v2.instance_pb2.Cluster`.
operation_pb = self._instance._client._instance_stub.UpdateCluster(
request_pb, self._instance._client.timeout_seconds)
op_id = _process_operation(operation_pb)
return Operation('update', op_id, cluster=self)
def delete(self):
"""Delete this cluster.
Marks a cluster and all of its tables for permanent deletion in 7 days.
Immediately upon completion of the request:
* Billing will cease for all of the cluster's reserved resources.
* The cluster's ``delete_time`` field will be set 7 days in the future.
Soon afterward:
* All tables within the cluster will become unavailable.
Prior to the cluster's ``delete_time``:
* The cluster can be recovered with a call to ``UndeleteCluster``.
* All other attempts to modify or delete the cluster will be rejected.
At the cluster's ``delete_time``:
* The cluster and **all of its tables** will immediately and
irrevocably disappear from the API, and their data will be
permanently deleted.
"""
request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name)
# We expect a `google.protobuf.empty_pb2.Empty`
self._instance._client._instance_stub.DeleteCluster(
request_pb, self._instance._client.timeout_seconds)

View file

@ -0,0 +1,339 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Column Family."""
import datetime
from google.protobuf import duration_pb2
from gcloud._helpers import _total_seconds
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
def _timedelta_to_duration_pb(timedelta_val):
"""Convert a Python timedelta object to a duration protobuf.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type timedelta_val: :class:`datetime.timedelta`
:param timedelta_val: A timedelta object.
:rtype: :class:`google.protobuf.duration_pb2.Duration`
:returns: A duration object equivalent to the time delta.
"""
seconds_decimal = _total_seconds(timedelta_val)
# Truncate the parts other than the integer.
seconds = int(seconds_decimal)
if seconds_decimal < 0:
signed_micros = timedelta_val.microseconds - 10**6
else:
signed_micros = timedelta_val.microseconds
# Convert nanoseconds to microseconds.
nanos = 1000 * signed_micros
return duration_pb2.Duration(seconds=seconds, nanos=nanos)
def _duration_pb_to_timedelta(duration_pb):
"""Convert a duration protobuf to a Python timedelta object.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
:param duration_pb: A protobuf duration object.
:rtype: :class:`datetime.timedelta`
:returns: The converted timedelta object.
"""
return datetime.timedelta(
seconds=duration_pb.seconds,
microseconds=(duration_pb.nanos / 1000.0),
)
class GarbageCollectionRule(object):
"""Garbage collection rule for column families within a table.
Cells in the column family (within a table) fitting the rule will be
deleted during garbage collection.
.. note::
This class is a do-nothing base class for all GC rules.
.. note::
A string ``gc_expression`` can also be used with API requests, but
that value would be superceded by a ``gc_rule``. As a result, we
don't support that feature and instead support via native classes.
"""
def __ne__(self, other):
return not self.__eq__(other)
class MaxVersionsGCRule(GarbageCollectionRule):
"""Garbage collection limiting the number of versions of a cell.
:type max_num_versions: int
:param max_num_versions: The maximum number of versions
"""
def __init__(self, max_num_versions):
self.max_num_versions = max_num_versions
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.max_num_versions == self.max_num_versions
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions)
class MaxAgeGCRule(GarbageCollectionRule):
"""Garbage collection limiting the age of a cell.
:type max_age: :class:`datetime.timedelta`
:param max_age: The maximum age allowed for a cell in the table.
"""
def __init__(self, max_age):
self.max_age = max_age
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.max_age == self.max_age
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _timedelta_to_duration_pb(self.max_age)
return table_v2_pb2.GcRule(max_age=max_age)
class GCRuleUnion(GarbageCollectionRule):
"""Union of garbage collection rules.
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.rules == self.rules
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
union = table_v2_pb2.GcRule.Union(
rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(union=union)
class GCRuleIntersection(GarbageCollectionRule):
"""Intersection of garbage collection rules.
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.rules == self.rules
def to_pb(self):
"""Converts the intersection into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
intersection = table_v2_pb2.GcRule.Intersection(
rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(intersection=intersection)
class ColumnFamily(object):
"""Representation of a Google Cloud Bigtable Column Family.
We can use a :class:`ColumnFamily` to:
* :meth:`create` itself
* :meth:`update` itself
* :meth:`delete` itself
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the column family.
:type gc_rule: :class:`GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
"""
def __init__(self, column_family_id, table, gc_rule=None):
self.column_family_id = column_family_id
self._table = table
self.gc_rule = gc_rule
@property
def name(self):
"""Column family name used in requests.
.. note::
This property will not change if ``column_family_id`` does not, but
the return value is not cached.
The table name is of the form
``"projects/../zones/../clusters/../tables/../columnFamilies/.."``
:rtype: str
:returns: The column family name.
"""
return self._table.name + '/columnFamilies/' + self.column_family_id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.column_family_id == self.column_family_id and
other._table == self._table and
other.gc_rule == self.gc_rule)
def __ne__(self, other):
return not self.__eq__(other)
def create(self):
"""Create this column family."""
if self.gc_rule is None:
column_family = table_v2_pb2.ColumnFamily()
else:
column_family = table_v2_pb2.ColumnFamily(
gc_rule=self.gc_rule.to_pb())
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=self._table.name)
request_pb.modifications.add(
id=self.column_family_id,
create=column_family,
)
client = self._table._instance._client
# We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.ModifyColumnFamilies(request_pb,
client.timeout_seconds)
def update(self):
"""Update this column family.
.. note::
Only the GC rule can be updated. By changing the column family ID,
you will simply be referring to a different column family.
"""
if self.gc_rule is None:
column_family = table_v2_pb2.ColumnFamily()
else:
column_family = table_v2_pb2.ColumnFamily(
gc_rule=self.gc_rule.to_pb())
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=self._table.name)
request_pb.modifications.add(
id=self.column_family_id,
update=column_family)
client = self._table._instance._client
# We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.ModifyColumnFamilies(request_pb,
client.timeout_seconds)
def delete(self):
"""Delete this column family."""
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=self._table.name)
request_pb.modifications.add(
id=self.column_family_id,
drop=True)
client = self._table._instance._client
# We expect a `google.protobuf.empty_pb2.Empty`
client._table_stub.ModifyColumnFamilies(request_pb,
client.timeout_seconds)
def _gc_rule_from_pb(gc_rule_pb):
"""Convert a protobuf GC rule to a native object.
:type gc_rule_pb: :class:`.table_v2_pb2.GcRule`
:param gc_rule_pb: The GC rule to convert.
:rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>`
:returns: An instance of one of the native rules defined
in :module:`column_family` or :data:`None` if no values were
set on the protobuf passed in.
:raises: :class:`ValueError <exceptions.ValueError>` if the rule name
is unexpected.
"""
rule_name = gc_rule_pb.WhichOneof('rule')
if rule_name is None:
return None
if rule_name == 'max_num_versions':
return MaxVersionsGCRule(gc_rule_pb.max_num_versions)
elif rule_name == 'max_age':
max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age)
return MaxAgeGCRule(max_age)
elif rule_name == 'union':
return GCRuleUnion([_gc_rule_from_pb(rule)
for rule in gc_rule_pb.union.rules])
elif rule_name == 'intersection':
rules = [_gc_rule_from_pb(rule)
for rule in gc_rule_pb.intersection.rules]
return GCRuleIntersection(rules)
else:
raise ValueError('Unexpected rule name', rule_name)

View file

@ -0,0 +1,167 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase package.
This package is intended to emulate the HappyBase library using
Google Cloud Bigtable as the backing store.
Differences in Public API
-------------------------
Some concepts from HBase/Thrift do not map directly to the Cloud
Bigtable API. As a result, the following instance methods and functions
could not be implemented:
* :meth:`Connection.enable_table() \
<gcloud.bigtable.happybase.connection.Connection.enable_table>` - no
concept of enabled/disabled
* :meth:`Connection.disable_table() \
<gcloud.bigtable.happybase.connection.Connection.disable_table>` - no
concept of enabled/disabled
* :meth:`Connection.is_table_enabled() \
<gcloud.bigtable.happybase.connection.Connection.is_table_enabled>`
- no concept of enabled/disabled
* :meth:`Connection.compact_table() \
<gcloud.bigtable.happybase.connection.Connection.compact_table>` -
table storage is opaque to user
* :meth:`Table.regions() <gcloud.bigtable.happybase.table.Table.regions>`
- tables in Cloud Bigtable do not expose internal storage details
* :meth:`Table.counter_set() \
<gcloud.bigtable.happybase.table.Table.counter_set>` - method can't
be atomic, so we disable it
* The ``__version__`` value for the HappyBase package is :data:`None`.
However, it's worth nothing this implementation was based off HappyBase
0.9.
In addition, many of the constants from
:mod:`connection <gcloud.bigtable.happybase.connection>`
are specific to HBase and are defined as :data:`None` in our module:
* ``COMPAT_MODES``
* ``THRIFT_TRANSPORTS``
* ``THRIFT_PROTOCOLS``
* ``DEFAULT_HOST``
* ``DEFAULT_PORT``
* ``DEFAULT_TRANSPORT``
* ``DEFAULT_COMPAT``
* ``DEFAULT_PROTOCOL``
Two of these ``DEFAULT_HOST`` and ``DEFAULT_PORT``, are even imported in
the main :mod:`happybase <gcloud.bigtable.happybase>` package.
Finally, we do not provide the ``util`` module. Though it is public in the
HappyBase library, it provides no core functionality.
API Behavior Changes
--------------------
* Since there is no concept of an enabled / disabled table, calling
:meth:`Connection.delete_table() \
<gcloud.bigtable.happybase.connection.Connection.delete_table>`
with ``disable=True`` can't be supported.
Using that argument will result in a warning.
* The :class:`Connection <gcloud.bigtable.happybase.connection.Connection>`
constructor **disables** the use of several
arguments and will print a warning if any of them are passed in as keyword
arguments. The arguments are:
* ``host``
* ``port``
* ``compat``
* ``transport``
* ``protocol``
* In order to make
:class:`Connection <gcloud.bigtable.happybase.connection.Connection>`
compatible with Cloud Bigtable, we add a ``instance`` keyword argument to
allow users to pass in their own
:class:`Instance <gcloud.bigtable.instance.Instance>` (which they can
construct beforehand).
For example:
.. code:: python
from gcloud.bigtable.client import Client
client = Client(project=PROJECT_ID, admin=True)
instance = client.instance(instance_id, location_id)
instance.reload()
from gcloud.bigtable.happybase import Connection
connection = Connection(instance=instance)
* Any uses of the ``wal`` (Write Ahead Log) argument will result in a
warning as well. This includes uses in:
* :class:`Batch <gcloud.bigtable.happybase.batch.Batch>`
* :meth:`Batch.put() <gcloud.bigtable.happybase.batch.Batch.put>`
* :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>`
* :meth:`Table.put() <gcloud.bigtable.happybase.table.Table.put>`
* :meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`
* :meth:`Table.batch() <gcloud.bigtable.happybase.table.Table.batch>` factory
* When calling
:meth:`Connection.create_table() \
<gcloud.bigtable.happybase.connection.Connection.create_table>`, the
majority of HBase column family options cannot be used. Among
* ``max_versions``
* ``compression``
* ``in_memory``
* ``bloom_filter_type``
* ``bloom_filter_vector_size``
* ``bloom_filter_nb_hashes``
* ``block_cache_enabled``
* ``time_to_live``
Only ``max_versions`` and ``time_to_live`` are availabe in Cloud Bigtable
(as
:class:`MaxVersionsGCRule <gcloud.bigtable.column_family.MaxVersionsGCRule>`
and
:class:`MaxAgeGCRule <gcloud.bigtable.column_family.MaxAgeGCRule>`).
In addition to using a dictionary for specifying column family options,
we also accept instances of :class:`.GarbageCollectionRule` or subclasses.
* :meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` no longer
accepts the following arguments (which will result in a warning):
* ``batch_size``
* ``scan_batching``
* ``sorted_columns``
* Using a HBase filter string in
:meth:`Table.scan() <gcloud.bigtable.happybase.table.Table.scan>` is
not possible with Cloud Bigtable and will result in a
:class:`TypeError <exceptions.TypeError>`. However, the method now accepts
instances of :class:`.RowFilter` and subclasses.
* :meth:`Batch.delete() <gcloud.bigtable.happybase.batch.Batch.delete>` (and
hence
:meth:`Table.delete() <gcloud.bigtable.happybase.table.Table.delete>`)
will fail with a :class:`ValueError <exceptions.ValueError>` when either a
row or column family delete is attempted with a ``timestamp``. This is
because the Cloud Bigtable API uses the ``DeleteFromFamily`` and
``DeleteFromRow`` mutations for these deletes, and neither of these
mutations support a timestamp.
"""
from gcloud.bigtable.happybase.batch import Batch
from gcloud.bigtable.happybase.connection import Connection
from gcloud.bigtable.happybase.connection import DEFAULT_HOST
from gcloud.bigtable.happybase.connection import DEFAULT_PORT
from gcloud.bigtable.happybase.pool import ConnectionPool
from gcloud.bigtable.happybase.pool import NoConnectionsAvailable
from gcloud.bigtable.happybase.table import Table
# Values from HappyBase that we don't reproduce / are not relevant.
__version__ = None

View file

@ -0,0 +1,326 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase batch module."""
import datetime
import warnings
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable.row_filters import TimestampRange
_WAL_SENTINEL = object()
# Assumed granularity of timestamps in Cloud Bigtable.
_ONE_MILLISECOND = datetime.timedelta(microseconds=1000)
_WARN = warnings.warn
_WAL_WARNING = ('The wal argument (Write-Ahead-Log) is not '
'supported by Cloud Bigtable.')
class Batch(object):
"""Batch class for accumulating mutations.
.. note::
When using a batch with ``transaction=False`` as a context manager
(i.e. in a ``with`` statement), mutations will still be sent as
row mutations even if the context manager exits with an error.
This behavior is in place to match the behavior in the HappyBase
HBase / Thrift implementation.
:type table: :class:`Table <gcloud.bigtable.happybase.table.Table>`
:param table: The table where mutations will be applied.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the epoch)
that all mutations will be applied at.
:type batch_size: int
:param batch_size: (Optional) The maximum number of mutations to allow
to accumulate before committing them.
:type transaction: bool
:param transaction: Flag indicating if the mutations should be sent
transactionally or not. If ``transaction=True`` and
an error occurs while a :class:`Batch` is active,
then none of the accumulated mutations will be
committed. If ``batch_size`` is set, the mutation
can't be transactional.
:type wal: object
:param wal: Unused parameter (Boolean for using the HBase Write Ahead Log).
Provided for compatibility with HappyBase, but irrelevant for
Cloud Bigtable since it does not have a Write Ahead Log.
:raises: :class:`TypeError <exceptions.TypeError>` if ``batch_size``
is set and ``transaction=True``.
:class:`ValueError <exceptions.ValueError>` if ``batch_size``
is not positive.
"""
def __init__(self, table, timestamp=None, batch_size=None,
transaction=False, wal=_WAL_SENTINEL):
if wal is not _WAL_SENTINEL:
_WARN(_WAL_WARNING)
if batch_size is not None:
if transaction:
raise TypeError('When batch_size is set, a Batch cannot be '
'transactional')
if batch_size <= 0:
raise ValueError('batch_size must be positive')
self._table = table
self._batch_size = batch_size
self._timestamp = self._delete_range = None
# Timestamp is in milliseconds, convert to microseconds.
if timestamp is not None:
self._timestamp = _datetime_from_microseconds(1000 * timestamp)
# For deletes, we get the very next timestamp (assuming timestamp
# granularity is milliseconds). This is because HappyBase users
# expect HBase deletes to go **up to** and **including** the
# timestamp while Cloud Bigtable Time Ranges **exclude** the
# final timestamp.
next_timestamp = self._timestamp + _ONE_MILLISECOND
self._delete_range = TimestampRange(end=next_timestamp)
self._transaction = transaction
# Internal state for tracking mutations.
self._row_map = {}
self._mutation_count = 0
def send(self):
"""Send / commit the batch of mutations to the server."""
for row in self._row_map.values():
# commit() does nothing if row hasn't accumulated any mutations.
row.commit()
self._row_map.clear()
self._mutation_count = 0
def _try_send(self):
"""Send / commit the batch if mutations have exceeded batch size."""
if self._batch_size and self._mutation_count >= self._batch_size:
self.send()
def _get_row(self, row_key):
"""Gets a row that will hold mutations.
If the row is not already cached on the current batch, a new row will
be created.
:type row_key: str
:param row_key: The row key for a row stored in the map.
:rtype: :class:`Row <gcloud.bigtable.row.Row>`
:returns: The newly created or stored row that will hold mutations.
"""
if row_key not in self._row_map:
table = self._table._low_level_table
self._row_map[row_key] = table.row(row_key)
return self._row_map[row_key]
def put(self, row, data, wal=_WAL_SENTINEL):
"""Insert data into a row in the table owned by this batch.
:type row: str
:param row: The row key where the mutation will be "put".
:type data: dict
:param data: Dictionary containing the data to be inserted. The keys
are columns names (of the form ``fam:col``) and the values
are strings (bytes) to be stored in those columns.
:type wal: object
:param wal: Unused parameter (to over-ride the default on the
instance). Provided for compatibility with HappyBase, but
irrelevant for Cloud Bigtable since it does not have a
Write Ahead Log.
"""
if wal is not _WAL_SENTINEL:
_WARN(_WAL_WARNING)
row_object = self._get_row(row)
# Make sure all the keys are valid before beginning
# to add mutations.
column_pairs = _get_column_pairs(six.iterkeys(data),
require_qualifier=True)
for column_family_id, column_qualifier in column_pairs:
value = data[column_family_id + ':' + column_qualifier]
row_object.set_cell(column_family_id, column_qualifier,
value, timestamp=self._timestamp)
self._mutation_count += len(data)
self._try_send()
def _delete_columns(self, columns, row_object):
"""Adds delete mutations for a list of columns and column families.
:type columns: list
:param columns: Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:type row_object: :class:`Row <gcloud_bigtable.row.Row>`
:param row_object: The row which will hold the delete mutations.
:raises: :class:`ValueError <exceptions.ValueError>` if the delete
timestamp range is set on the current batch, but a
column family delete is attempted.
"""
column_pairs = _get_column_pairs(columns)
for column_family_id, column_qualifier in column_pairs:
if column_qualifier is None:
if self._delete_range is not None:
raise ValueError('The Cloud Bigtable API does not support '
'adding a timestamp to '
'"DeleteFromFamily" ')
row_object.delete_cells(column_family_id,
columns=row_object.ALL_COLUMNS)
else:
row_object.delete_cell(column_family_id,
column_qualifier,
time_range=self._delete_range)
def delete(self, row, columns=None, wal=_WAL_SENTINEL):
"""Delete data from a row in the table owned by this batch.
:type row: str
:param row: The row key where the delete will occur.
:type columns: list
:param columns: (Optional) Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
If not used, will delete the entire row.
:type wal: object
:param wal: Unused parameter (to over-ride the default on the
instance). Provided for compatibility with HappyBase, but
irrelevant for Cloud Bigtable since it does not have a
Write Ahead Log.
:raises: If the delete timestamp range is set on the
current batch, but a full row delete is attempted.
"""
if wal is not _WAL_SENTINEL:
_WARN(_WAL_WARNING)
row_object = self._get_row(row)
if columns is None:
# Delete entire row.
if self._delete_range is not None:
raise ValueError('The Cloud Bigtable API does not support '
'adding a timestamp to "DeleteFromRow" '
'mutations')
row_object.delete()
self._mutation_count += 1
else:
self._delete_columns(columns, row_object)
self._mutation_count += len(columns)
self._try_send()
def __enter__(self):
"""Enter context manager, no set-up required."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit context manager, no set-up required.
:type exc_type: type
:param exc_type: The type of the exception if one occurred while the
context manager was active. Otherwise, :data:`None`.
:type exc_value: :class:`Exception <exceptions.Exception>`
:param exc_value: An instance of ``exc_type`` if an exception occurred
while the context was active.
Otherwise, :data:`None`.
:type traceback: ``traceback`` type
:param traceback: The traceback where the exception occurred (if one
did occur). Otherwise, :data:`None`.
"""
# If the context manager encountered an exception and the batch is
# transactional, we don't commit the mutations.
if self._transaction and exc_type is not None:
return
# NOTE: For non-transactional batches, this will even commit mutations
# if an error occurred during the context manager.
self.send()
def _get_column_pairs(columns, require_qualifier=False):
"""Turns a list of column or column families into parsed pairs.
Turns a column family (``fam`` or ``fam:``) into a pair such
as ``['fam', None]`` and turns a column (``fam:col``) into
``['fam', 'col']``.
:type columns: list
:param columns: Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:type require_qualifier: bool
:param require_qualifier: Boolean indicating if the columns should
all have a qualifier or not.
:rtype: list
:returns: List of pairs, where the first element in each pair is the
column family and the second is the column qualifier
(or :data:`None`).
:raises: :class:`ValueError <exceptions.ValueError>` if any of the columns
are not of the expected format.
:class:`ValueError <exceptions.ValueError>` if
``require_qualifier`` is :data:`True` and one of the values is
for an entire column family
"""
column_pairs = []
for column in columns:
if isinstance(column, six.binary_type):
column = column.decode('utf-8')
# Remove trailing colons (i.e. for standalone column family).
if column.endswith(u':'):
column = column[:-1]
num_colons = column.count(u':')
if num_colons == 0:
# column is a column family.
if require_qualifier:
raise ValueError('column does not contain a qualifier',
column)
else:
column_pairs.append([column, None])
elif num_colons == 1:
column_pairs.append(column.split(u':'))
else:
raise ValueError('Column contains the : separator more than once')
return column_pairs

View file

@ -0,0 +1,484 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase connection module."""
import datetime
import warnings
import six
from grpc.beta import interfaces
from grpc.framework.interfaces.face import face
try:
from happybase.hbase.ttypes import AlreadyExists
except ImportError:
from gcloud.exceptions import Conflict as AlreadyExists
from gcloud.bigtable.client import Client
from gcloud.bigtable.column_family import GCRuleIntersection
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
from gcloud.bigtable.happybase.table import Table
from gcloud.bigtable.table import Table as _LowLevelTable
# Constants reproduced here for HappyBase compatibility, though values
# are all null.
COMPAT_MODES = None
THRIFT_TRANSPORTS = None
THRIFT_PROTOCOLS = None
DEFAULT_HOST = None
DEFAULT_PORT = None
DEFAULT_TRANSPORT = None
DEFAULT_COMPAT = None
DEFAULT_PROTOCOL = None
_LEGACY_ARGS = frozenset(('host', 'port', 'compat', 'transport', 'protocol'))
_WARN = warnings.warn
_DISABLE_DELETE_MSG = ('The disable argument should not be used in '
'delete_table(). Cloud Bigtable has no concept '
'of enabled / disabled tables.')
def _get_instance(timeout=None):
"""Gets instance for the default project.
Creates a client with the inferred credentials and project ID from
the local environment. Then uses
:meth:`.bigtable.client.Client.list_instances` to
get the unique instance owned by the project.
If the request fails for any reason, or if there isn't exactly one instance
owned by the project, then this function will fail.
:type timeout: int
:param timeout: (Optional) The socket timeout in milliseconds.
:rtype: :class:`gcloud.bigtable.instance.Instance`
:returns: The unique instance owned by the project inferred from
the environment.
:raises: :class:`ValueError <exceptions.ValueError>` if there is a failed
location or any number of instances other than one.
"""
client_kwargs = {'admin': True}
if timeout is not None:
client_kwargs['timeout_seconds'] = timeout / 1000.0
client = Client(**client_kwargs)
try:
client.start()
instances, failed_locations = client.list_instances()
finally:
client.stop()
if len(failed_locations) != 0:
raise ValueError('Determining instance via ListInstances encountered '
'failed locations.')
if len(instances) == 0:
raise ValueError('This client doesn\'t have access to any instances.')
if len(instances) > 1:
raise ValueError('This client has access to more than one instance. '
'Please directly pass the instance you\'d '
'like to use.')
return instances[0]
class Connection(object):
"""Connection to Cloud Bigtable backend.
.. note::
If you pass a ``instance``, it will be :meth:`.Instance.copy`-ed before
being stored on the new connection. This also copies the
:class:`Client <gcloud.bigtable.client.Client>` that created the
:class:`Instance <gcloud.bigtable.instance.Instance>` instance and the
:class:`Credentials <oauth2client.client.Credentials>` stored on the
client.
The arguments ``host``, ``port``, ``compat``, ``transport`` and
``protocol`` are allowed (as keyword arguments) for compatibility with
HappyBase. However, they will not be used in any way, and will cause a
warning if passed.
:type timeout: int
:param timeout: (Optional) The socket timeout in milliseconds.
:type autoconnect: bool
:param autoconnect: (Optional) Whether the connection should be
:meth:`open`-ed during construction.
:type table_prefix: str
:param table_prefix: (Optional) Prefix used to construct table names.
:type table_prefix_separator: str
:param table_prefix_separator: (Optional) Separator used with
``table_prefix``. Defaults to ``_``.
:type instance: :class:`Instance <gcloud.bigtable.instance.Instance>`
:param instance: (Optional) A Cloud Bigtable instance. The instance also
owns a client for making gRPC requests to the Cloud
Bigtable API. If not passed in, defaults to creating client
with ``admin=True`` and using the ``timeout`` here for the
``timeout_seconds`` argument to the
:class:`Client <gcloud.bigtable.client.Client>`
constructor. The credentials for the client
will be the implicit ones loaded from the environment.
Then that client is used to retrieve all the instances
owned by the client's project.
:type kwargs: dict
:param kwargs: Remaining keyword arguments. Provided for HappyBase
compatibility.
"""
_instance = None
def __init__(self, timeout=None, autoconnect=True, table_prefix=None,
table_prefix_separator='_', instance=None, **kwargs):
self._handle_legacy_args(kwargs)
if table_prefix is not None:
if not isinstance(table_prefix, six.string_types):
raise TypeError('table_prefix must be a string', 'received',
table_prefix, type(table_prefix))
if not isinstance(table_prefix_separator, six.string_types):
raise TypeError('table_prefix_separator must be a string',
'received', table_prefix_separator,
type(table_prefix_separator))
self.table_prefix = table_prefix
self.table_prefix_separator = table_prefix_separator
if instance is None:
self._instance = _get_instance(timeout=timeout)
else:
if timeout is not None:
raise ValueError('Timeout cannot be used when an existing '
'instance is passed')
self._instance = instance.copy()
if autoconnect:
self.open()
self._initialized = True
@staticmethod
def _handle_legacy_args(arguments_dict):
"""Check legacy HappyBase arguments and warn if set.
:type arguments_dict: dict
:param arguments_dict: Unused keyword arguments.
:raises: :class:`TypeError <exceptions.TypeError>` if a keyword other
than ``host``, ``port``, ``compat``, ``transport`` or
``protocol`` is used.
"""
common_args = _LEGACY_ARGS.intersection(six.iterkeys(arguments_dict))
if common_args:
all_args = ', '.join(common_args)
message = ('The HappyBase legacy arguments %s were used. These '
'arguments are unused by gcloud.' % (all_args,))
_WARN(message)
for arg_name in common_args:
arguments_dict.pop(arg_name)
if arguments_dict:
unexpected_names = arguments_dict.keys()
raise TypeError('Received unexpected arguments', unexpected_names)
def open(self):
"""Open the underlying transport to Cloud Bigtable.
This method opens the underlying HTTP/2 gRPC connection using a
:class:`Client <gcloud.bigtable.client.Client>` bound to the
:class:`Instance <gcloud.bigtable.instance.Instance>` owned by
this connection.
"""
self._instance._client.start()
def close(self):
"""Close the underlying transport to Cloud Bigtable.
This method closes the underlying HTTP/2 gRPC connection using a
:class:`Client <gcloud.bigtable.client.Client>` bound to the
:class:`Instance <gcloud.bigtable.instance.Instance>` owned by
this connection.
"""
self._instance._client.stop()
def __del__(self):
if self._instance is not None:
self.close()
def _table_name(self, name):
"""Construct a table name by optionally adding a table name prefix.
:type name: str
:param name: The name to have a prefix added to it.
:rtype: str
:returns: The prefixed name, if the current connection has a table
prefix set.
"""
if self.table_prefix is None:
return name
return self.table_prefix + self.table_prefix_separator + name
def table(self, name, use_prefix=True):
"""Table factory.
:type name: str
:param name: The name of the table to be created.
:type use_prefix: bool
:param use_prefix: Whether to use the table prefix (if any).
:rtype: :class:`Table <gcloud.bigtable.happybase.table.Table>`
:returns: Table instance owned by this connection.
"""
if use_prefix:
name = self._table_name(name)
return Table(name, self)
def tables(self):
"""Return a list of table names available to this connection.
.. note::
This lists every table in the instance owned by this connection,
**not** every table that a given user may have access to.
.. note::
If ``table_prefix`` is set on this connection, only returns the
table names which match that prefix.
:rtype: list
:returns: List of string table names.
"""
low_level_table_instances = self._instance.list_tables()
table_names = [table_instance.table_id
for table_instance in low_level_table_instances]
# Filter using prefix, and strip prefix from names
if self.table_prefix is not None:
prefix = self._table_name('')
offset = len(prefix)
table_names = [name[offset:] for name in table_names
if name.startswith(prefix)]
return table_names
def create_table(self, name, families):
"""Create a table.
.. warning::
The only column family options from HappyBase that are able to be
used with Cloud Bigtable are ``max_versions`` and ``time_to_live``.
.. note::
This method is **not** atomic. The Cloud Bigtable API separates
the creation of a table from the creation of column families. Thus
this method needs to send 1 request for the table creation and 1
request for each column family. If any of these fails, the method
will fail, but the progress made towards completion cannot be
rolled back.
Values in ``families`` represent column family options. In HappyBase,
these are dictionaries, corresponding to the ``ColumnDescriptor``
structure in the Thrift API. The accepted keys are:
* ``max_versions`` (``int``)
* ``compression`` (``str``)
* ``in_memory`` (``bool``)
* ``bloom_filter_type`` (``str``)
* ``bloom_filter_vector_size`` (``int``)
* ``bloom_filter_nb_hashes`` (``int``)
* ``block_cache_enabled`` (``bool``)
* ``time_to_live`` (``int``)
:type name: str
:param name: The name of the table to be created.
:type families: dict
:param families: Dictionary with column family names as keys and column
family options as the values. The options can be among
* :class:`dict`
* :class:`.GarbageCollectionRule`
:raises: :class:`TypeError <exceptions.TypeError>` if ``families`` is
not a dictionary,
:class:`ValueError <exceptions.ValueError>` if ``families``
has no entries
"""
if not isinstance(families, dict):
raise TypeError('families arg must be a dictionary')
if not families:
raise ValueError('Cannot create table %r (no column '
'families specified)' % (name,))
# Parse all keys before making any API requests.
gc_rule_dict = {}
for column_family_name, option in families.items():
if isinstance(column_family_name, six.binary_type):
column_family_name = column_family_name.decode('utf-8')
if column_family_name.endswith(':'):
column_family_name = column_family_name[:-1]
gc_rule_dict[column_family_name] = _parse_family_option(option)
# Create table instance and then make API calls.
name = self._table_name(name)
low_level_table = _LowLevelTable(name, self._instance)
try:
low_level_table.create()
except face.NetworkError as network_err:
if network_err.code == interfaces.StatusCode.ALREADY_EXISTS:
raise AlreadyExists(name)
else:
raise
for column_family_name, gc_rule in gc_rule_dict.items():
column_family = low_level_table.column_family(
column_family_name, gc_rule=gc_rule)
column_family.create()
def delete_table(self, name, disable=False):
"""Delete the specified table.
:type name: str
:param name: The name of the table to be deleted. If ``table_prefix``
is set, a prefix will be added to the ``name``.
:type disable: bool
:param disable: Whether to first disable the table if needed. This
is provided for compatibility with HappyBase, but is
not relevant for Cloud Bigtable since it has no concept
of enabled / disabled tables.
"""
if disable:
_WARN(_DISABLE_DELETE_MSG)
name = self._table_name(name)
_LowLevelTable(name, self._instance).delete()
def enable_table(self, name):
"""Enable the specified table.
.. warning::
Cloud Bigtable has no concept of enabled / disabled tables so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API has no concept of '
'enabled or disabled tables.')
def disable_table(self, name):
"""Disable the specified table.
.. warning::
Cloud Bigtable has no concept of enabled / disabled tables so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API has no concept of '
'enabled or disabled tables.')
def is_table_enabled(self, name):
"""Return whether the specified table is enabled.
.. warning::
Cloud Bigtable has no concept of enabled / disabled tables so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API has no concept of '
'enabled or disabled tables.')
def compact_table(self, name, major=False):
"""Compact the specified table.
.. warning::
Cloud Bigtable does not support compacting a table, so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API does not support '
'compacting a table.')
def _parse_family_option(option):
"""Parses a column family option into a garbage collection rule.
.. note::
If ``option`` is not a dictionary, the type is not checked.
If ``option`` is :data:`None`, there is nothing to do, since this
is the correct output.
:type option: :class:`dict`,
:data:`NoneType <types.NoneType>`,
:class:`.GarbageCollectionRule`
:param option: A column family option passes as a dictionary value in
:meth:`Connection.create_table`.
:rtype: :class:`.GarbageCollectionRule`
:returns: A garbage collection rule parsed from the input.
"""
result = option
if isinstance(result, dict):
if not set(result.keys()) <= set(['max_versions', 'time_to_live']):
all_keys = ', '.join(repr(key) for key in result.keys())
warning_msg = ('Cloud Bigtable only supports max_versions and '
'time_to_live column family settings. '
'Received: %s' % (all_keys,))
_WARN(warning_msg)
max_num_versions = result.get('max_versions')
max_age = None
if 'time_to_live' in result:
max_age = datetime.timedelta(seconds=result['time_to_live'])
versions_rule = age_rule = None
if max_num_versions is not None:
versions_rule = MaxVersionsGCRule(max_num_versions)
if max_age is not None:
age_rule = MaxAgeGCRule(max_age)
if versions_rule is None:
result = age_rule
else:
if age_rule is None:
result = versions_rule
else:
result = GCRuleIntersection(rules=[age_rule, versions_rule])
return result

View file

@ -0,0 +1,153 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase pool module."""
import contextlib
import threading
import six
from gcloud.bigtable.happybase.connection import Connection
from gcloud.bigtable.happybase.connection import _get_instance
_MIN_POOL_SIZE = 1
"""Minimum allowable size of a connection pool."""
class NoConnectionsAvailable(RuntimeError):
"""Exception raised when no connections are available.
This happens if a timeout was specified when obtaining a connection,
and no connection became available within the specified timeout.
"""
class ConnectionPool(object):
"""Thread-safe connection pool.
.. note::
All keyword arguments are passed unmodified to the
:class:`Connection <.happybase.connection.Connection>` constructor
**except** for ``autoconnect``. This is because the ``open`` /
``closed`` status of a connection is managed by the pool. In addition,
if ``instance`` is not passed, the default / inferred instance is
determined by the pool and then passed to each
:class:`Connection <.happybase.connection.Connection>` that is created.
:type size: int
:param size: The maximum number of concurrently open connections.
:type kwargs: dict
:param kwargs: Keyword arguments passed to
:class:`Connection <.happybase.Connection>`
constructor.
:raises: :class:`TypeError <exceptions.TypeError>` if ``size``
is non an integer.
:class:`ValueError <exceptions.ValueError>` if ``size``
is not positive.
"""
def __init__(self, size, **kwargs):
if not isinstance(size, six.integer_types):
raise TypeError('Pool size arg must be an integer')
if size < _MIN_POOL_SIZE:
raise ValueError('Pool size must be positive')
self._lock = threading.Lock()
self._queue = six.moves.queue.LifoQueue(maxsize=size)
self._thread_connections = threading.local()
connection_kwargs = kwargs
connection_kwargs['autoconnect'] = False
if 'instance' not in connection_kwargs:
connection_kwargs['instance'] = _get_instance(
timeout=kwargs.get('timeout'))
for _ in six.moves.range(size):
connection = Connection(**connection_kwargs)
self._queue.put(connection)
def _acquire_connection(self, timeout=None):
"""Acquire a connection from the pool.
:type timeout: int
:param timeout: (Optional) Time (in seconds) to wait for a connection
to open.
:rtype: :class:`Connection <.happybase.Connection>`
:returns: An active connection from the queue stored on the pool.
:raises: :class:`NoConnectionsAvailable` if ``Queue.get`` fails
before the ``timeout`` (only if a timeout is specified).
"""
try:
return self._queue.get(block=True, timeout=timeout)
except six.moves.queue.Empty:
raise NoConnectionsAvailable('No connection available from pool '
'within specified timeout')
@contextlib.contextmanager
def connection(self, timeout=None):
"""Obtain a connection from the pool.
Must be used as a context manager, for example::
with pool.connection() as connection:
pass # do something with the connection
If ``timeout`` is omitted, this method waits forever for a connection
to become available from the local queue.
:type timeout: int
:param timeout: (Optional) Time (in seconds) to wait for a connection
to open.
:rtype: :class:`Connection <.happybase.connection.Connection>`
:returns: An active connection from the pool.
:raises: :class:`NoConnectionsAvailable` if no connection can be
retrieved from the pool before the ``timeout`` (only if
a timeout is specified).
"""
connection = getattr(self._thread_connections, 'current', None)
retrieved_new_cnxn = False
if connection is None:
# In this case we need to actually grab a connection from the
# pool. After retrieval, the connection is stored on a thread
# local so that nested connection requests from the same
# thread can re-use the same connection instance.
#
# NOTE: This code acquires a lock before assigning to the
# thread local; see
# ('https://emptysqua.re/blog/'
# 'another-thing-about-pythons-threadlocals/')
retrieved_new_cnxn = True
connection = self._acquire_connection(timeout)
with self._lock:
self._thread_connections.current = connection
# This is a no-op for connections that have already been opened
# since they just call Client.start().
connection.open()
yield connection
# Remove thread local reference after the outermost 'with' block
# ends. Afterwards the thread no longer owns the connection.
if retrieved_new_cnxn:
del self._thread_connections.current
self._queue.put(connection)

View file

@ -0,0 +1,980 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase table module."""
import struct
import warnings
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import _to_bytes
from gcloud._helpers import _total_seconds
from gcloud.bigtable.column_family import GCRuleIntersection
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
from gcloud.bigtable.happybase.batch import _get_column_pairs
from gcloud.bigtable.happybase.batch import _WAL_SENTINEL
from gcloud.bigtable.happybase.batch import Batch
from gcloud.bigtable.row_filters import CellsColumnLimitFilter
from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter
from gcloud.bigtable.row_filters import FamilyNameRegexFilter
from gcloud.bigtable.row_filters import RowFilterChain
from gcloud.bigtable.row_filters import RowFilterUnion
from gcloud.bigtable.row_filters import RowKeyRegexFilter
from gcloud.bigtable.row_filters import TimestampRange
from gcloud.bigtable.row_filters import TimestampRangeFilter
from gcloud.bigtable.table import Table as _LowLevelTable
_WARN = warnings.warn
_UNPACK_I64 = struct.Struct('>q').unpack
_SIMPLE_GC_RULES = (MaxAgeGCRule, MaxVersionsGCRule)
def make_row(cell_map, include_timestamp):
"""Make a row dict for a Thrift cell mapping.
.. warning::
This method is only provided for HappyBase compatibility, but does not
actually work.
:type cell_map: dict
:param cell_map: Dictionary with ``fam:col`` strings as keys and ``TCell``
instances as values.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API output is not the same '
'as the output from the Thrift server, so this '
'helper can not be implemented.', 'Called with',
cell_map, include_timestamp)
def make_ordered_row(sorted_columns, include_timestamp):
"""Make a row dict for sorted Thrift column results from scans.
.. warning::
This method is only provided for HappyBase compatibility, but does not
actually work.
:type sorted_columns: list
:param sorted_columns: List of ``TColumn`` instances from Thrift.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API output is not the same '
'as the output from the Thrift server, so this '
'helper can not be implemented.', 'Called with',
sorted_columns, include_timestamp)
class Table(object):
"""Representation of Cloud Bigtable table.
Used for adding data and
:type name: str
:param name: The name of the table.
:type connection: :class:`Connection <.happybase.connection.Connection>`
:param connection: The connection which has access to the table.
"""
def __init__(self, name, connection):
self.name = name
# This remains as legacy for HappyBase, but only the instance
# from the connection is needed.
self.connection = connection
self._low_level_table = None
if self.connection is not None:
self._low_level_table = _LowLevelTable(self.name,
self.connection._instance)
def __repr__(self):
return '<table.Table name=%r>' % (self.name,)
def families(self):
"""Retrieve the column families for this table.
:rtype: dict
:returns: Mapping from column family name to garbage collection rule
for a column family.
"""
column_family_map = self._low_level_table.list_column_families()
result = {}
for col_fam, col_fam_obj in six.iteritems(column_family_map):
result[col_fam] = _gc_rule_to_dict(col_fam_obj.gc_rule)
return result
def regions(self):
"""Retrieve the regions for this table.
.. warning::
Cloud Bigtable does not give information about how a table is laid
out in memory, so this method does not work. It is
provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API does not have a '
'concept of splitting a table into regions.')
def row(self, row, columns=None, timestamp=None, include_timestamp=False):
"""Retrieve a single row of data.
Returns the latest cells in each column (or all columns if ``columns``
is not specified). If a ``timestamp`` is set, then **latest** becomes
**latest** up until ``timestamp``.
:type row: str
:param row: Row key for the row we are reading from.
:type columns: list
:param columns: (Optional) Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch). If specified, only cells returned before the
the timestamp will be returned.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:rtype: dict
:returns: Dictionary containing all the latest column values in
the row.
"""
filters = []
if columns is not None:
filters.append(_columns_filter_helper(columns))
# versions == 1 since we only want the latest.
filter_ = _filter_chain_helper(versions=1, timestamp=timestamp,
filters=filters)
partial_row_data = self._low_level_table.read_row(
row, filter_=filter_)
if partial_row_data is None:
return {}
return _partial_row_to_dict(partial_row_data,
include_timestamp=include_timestamp)
def rows(self, rows, columns=None, timestamp=None,
include_timestamp=False):
"""Retrieve multiple rows of data.
All optional arguments behave the same in this method as they do in
:meth:`row`.
:type rows: list
:param rows: Iterable of the row keys for the rows we are reading from.
:type columns: list
:param columns: (Optional) Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch). If specified, only cells returned before (or
at) the timestamp will be returned.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:rtype: list
:returns: A list of pairs, where the first is the row key and the
second is a dictionary with the filtered values returned.
"""
if not rows:
# Avoid round-trip if the result is empty anyway
return []
filters = []
if columns is not None:
filters.append(_columns_filter_helper(columns))
filters.append(_row_keys_filter_helper(rows))
# versions == 1 since we only want the latest.
filter_ = _filter_chain_helper(versions=1, timestamp=timestamp,
filters=filters)
partial_rows_data = self._low_level_table.read_rows(filter_=filter_)
# NOTE: We could use max_loops = 1000 or some similar value to ensure
# that the stream isn't open too long.
partial_rows_data.consume_all()
result = []
for row_key in rows:
if row_key not in partial_rows_data.rows:
continue
curr_row_data = partial_rows_data.rows[row_key]
curr_row_dict = _partial_row_to_dict(
curr_row_data, include_timestamp=include_timestamp)
result.append((row_key, curr_row_dict))
return result
def cells(self, row, column, versions=None, timestamp=None,
include_timestamp=False):
"""Retrieve multiple versions of a single cell from the table.
:type row: str
:param row: Row key for the row we are reading from.
:type column: str
:param column: Column we are reading from; of the form ``fam:col``.
:type versions: int
:param versions: (Optional) The maximum number of cells to return. If
not set, returns all cells found.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch). If specified, only cells returned before (or
at) the timestamp will be returned.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:rtype: list
:returns: List of values in the cell (with timestamps if
``include_timestamp`` is :data:`True`).
"""
filter_ = _filter_chain_helper(column=column, versions=versions,
timestamp=timestamp)
partial_row_data = self._low_level_table.read_row(row, filter_=filter_)
if partial_row_data is None:
return []
else:
cells = partial_row_data._cells
# We know that `_filter_chain_helper` has already verified that
# column will split as such.
column_family_id, column_qualifier = column.split(':')
# NOTE: We expect the only key in `cells` is `column_family_id`
# and the only key `cells[column_family_id]` is
# `column_qualifier`. But we don't check that this is true.
curr_cells = cells[column_family_id][column_qualifier]
return _cells_to_pairs(
curr_cells, include_timestamp=include_timestamp)
def scan(self, row_start=None, row_stop=None, row_prefix=None,
columns=None, timestamp=None,
include_timestamp=False, limit=None, **kwargs):
"""Create a scanner for data in this table.
This method returns a generator that can be used for looping over the
matching rows.
If ``row_prefix`` is specified, only rows with row keys matching the
prefix will be returned. If given, ``row_start`` and ``row_stop``
cannot be used.
.. note::
Both ``row_start`` and ``row_stop`` can be :data:`None` to specify
the start and the end of the table respectively. If both are
omitted, a full table scan is done. Note that this usually results
in severe performance problems.
The keyword argument ``filter`` is also supported (beyond column and
row range filters supported here). HappyBase / HBase users will have
used this as an HBase filter string. (See the `Thrift docs`_ for more
details on those filters.) However, Google Cloud Bigtable doesn't
support those filter strings so a
:class:`~gcloud.bigtable.row.RowFilter` should be used instead.
.. _Thrift docs: http://hbase.apache.org/0.94/book/thrift.html
The arguments ``batch_size``, ``scan_batching`` and ``sorted_columns``
are allowed (as keyword arguments) for compatibility with
HappyBase. However, they will not be used in any way, and will cause a
warning if passed. (The ``batch_size`` determines the number of
results to retrieve per request. The HBase scanner defaults to reading
one record at a time, so this argument allows HappyBase to increase
that number. However, the Cloud Bigtable API uses HTTP/2 streaming so
there is no concept of a batched scan. The ``sorted_columns`` flag
tells HBase to return columns in order, but Cloud Bigtable doesn't
have this feature.)
:type row_start: str
:param row_start: (Optional) Row key where the scanner should start
(includes ``row_start``). If not specified, reads
from the first key. If the table does not contain
``row_start``, it will start from the next key after
it that **is** contained in the table.
:type row_stop: str
:param row_stop: (Optional) Row key where the scanner should stop
(excludes ``row_stop``). If not specified, reads
until the last key. The table does not have to contain
``row_stop``.
:type row_prefix: str
:param row_prefix: (Optional) Prefix to match row keys.
:type columns: list
:param columns: (Optional) Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch). If specified, only cells returned before (or
at) the timestamp will be returned.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:type limit: int
:param limit: (Optional) Maximum number of rows to return.
:type kwargs: dict
:param kwargs: Remaining keyword arguments. Provided for HappyBase
compatibility.
:raises: If ``limit`` is set but non-positive, or if ``row_prefix`` is
used with row start/stop,
:class:`TypeError <exceptions.TypeError>` if a string
``filter`` is used.
"""
row_start, row_stop, filter_chain = _scan_filter_helper(
row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs)
partial_rows_data = self._low_level_table.read_rows(
start_key=row_start, end_key=row_stop,
limit=limit, filter_=filter_chain)
# Mutable copy of data.
rows_dict = partial_rows_data.rows
while True:
try:
partial_rows_data.consume_next()
for row_key in sorted(rows_dict):
curr_row_data = rows_dict.pop(row_key)
# NOTE: We expect len(rows_dict) == 0, but don't check it.
curr_row_dict = _partial_row_to_dict(
curr_row_data, include_timestamp=include_timestamp)
yield (row_key, curr_row_dict)
except StopIteration:
break
def put(self, row, data, timestamp=None, wal=_WAL_SENTINEL):
"""Insert data into a row in this table.
.. note::
This method will send a request with a single "put" mutation.
In many situations, :meth:`batch` is a more appropriate
method to manipulate data since it helps combine many mutations
into a single request.
:type row: str
:param row: The row key where the mutation will be "put".
:type data: dict
:param data: Dictionary containing the data to be inserted. The keys
are columns names (of the form ``fam:col``) and the values
are strings (bytes) to be stored in those columns.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch) that the mutation will be applied at.
:type wal: object
:param wal: Unused parameter (to be passed to a created batch).
Provided for compatibility with HappyBase, but irrelevant
for Cloud Bigtable since it does not have a Write Ahead
Log.
"""
with self.batch(timestamp=timestamp, wal=wal) as batch:
batch.put(row, data)
def delete(self, row, columns=None, timestamp=None, wal=_WAL_SENTINEL):
"""Delete data from a row in this table.
This method deletes the entire ``row`` if ``columns`` is not
specified.
.. note::
This method will send a request with a single delete mutation.
In many situations, :meth:`batch` is a more appropriate
method to manipulate data since it helps combine many mutations
into a single request.
:type row: str
:param row: The row key where the delete will occur.
:type columns: list
:param columns: (Optional) Iterable containing column names (as
strings). Each column name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch) that the mutation will be applied at.
:type wal: object
:param wal: Unused parameter (to be passed to a created batch).
Provided for compatibility with HappyBase, but irrelevant
for Cloud Bigtable since it does not have a Write Ahead
Log.
"""
with self.batch(timestamp=timestamp, wal=wal) as batch:
batch.delete(row, columns)
def batch(self, timestamp=None, batch_size=None, transaction=False,
wal=_WAL_SENTINEL):
"""Create a new batch operation for this table.
This method returns a new
:class:`Batch <.happybase.batch.Batch>` instance that can be
used for mass data manipulation.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch) that all mutations will be applied at.
:type batch_size: int
:param batch_size: (Optional) The maximum number of mutations to allow
to accumulate before committing them.
:type transaction: bool
:param transaction: Flag indicating if the mutations should be sent
transactionally or not. If ``transaction=True`` and
an error occurs while a
:class:`Batch <.happybase.batch.Batch>` is
active, then none of the accumulated mutations will
be committed. If ``batch_size`` is set, the
mutation can't be transactional.
:type wal: object
:param wal: Unused parameter (to be passed to the created batch).
Provided for compatibility with HappyBase, but irrelevant
for Cloud Bigtable since it does not have a Write Ahead
Log.
:rtype: :class:`Batch <gcloud.bigtable.happybase.batch.Batch>`
:returns: A batch bound to this table.
"""
return Batch(self, timestamp=timestamp, batch_size=batch_size,
transaction=transaction, wal=wal)
def counter_get(self, row, column):
"""Retrieve the current value of a counter column.
This method retrieves the current value of a counter column. If the
counter column does not exist, this function initializes it to ``0``.
.. note::
Application code should **never** store a counter value directly;
use the atomic :meth:`counter_inc` and :meth:`counter_dec` methods
for that.
:type row: str
:param row: Row key for the row we are getting a counter from.
:type column: str
:param column: Column we are ``get``-ing from; of the form ``fam:col``.
:rtype: int
:returns: Counter value (after initializing / incrementing by 0).
"""
# Don't query directly, but increment with value=0 so that the counter
# is correctly initialized if didn't exist yet.
return self.counter_inc(row, column, value=0)
def counter_set(self, row, column, value=0):
"""Set a counter column to a specific value.
This method is provided in HappyBase, but we do not provide it here
because it defeats the purpose of using atomic increment and decrement
of a counter.
:type row: str
:param row: Row key for the row we are setting a counter in.
:type column: str
:param column: Column we are setting a value in; of
the form ``fam:col``.
:type value: int
:param value: Value to set the counter to.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('Table.counter_set will not be implemented. '
'Instead use the increment/decrement '
'methods along with counter_get.')
def counter_inc(self, row, column, value=1):
"""Atomically increment a counter column.
This method atomically increments a counter column in ``row``.
If the counter column does not exist, it is automatically initialized
to ``0`` before being incremented.
:type row: str
:param row: Row key for the row we are incrementing a counter in.
:type column: str
:param column: Column we are incrementing a value in; of the
form ``fam:col``.
:type value: int
:param value: Amount to increment the counter by. (If negative,
this is equivalent to decrement.)
:rtype: int
:returns: Counter value after incrementing.
"""
row = self._low_level_table.row(row, append=True)
if isinstance(column, six.binary_type):
column = column.decode('utf-8')
column_family_id, column_qualifier = column.split(':')
row.increment_cell_value(column_family_id, column_qualifier, value)
# See AppendRow.commit() will return a dictionary:
# {
# u'col-fam-id': {
# b'col-name1': [
# (b'cell-val', datetime.datetime(...)),
# ...
# ],
# ...
# },
# }
modified_cells = row.commit()
# Get the cells in the modified column,
column_cells = modified_cells[column_family_id][column_qualifier]
# Make sure there is exactly one cell in the column.
if len(column_cells) != 1:
raise ValueError('Expected server to return one modified cell.')
column_cell = column_cells[0]
# Get the bytes value from the column and convert it to an integer.
bytes_value = column_cell[0]
int_value, = _UNPACK_I64(bytes_value)
return int_value
def counter_dec(self, row, column, value=1):
"""Atomically decrement a counter column.
This method atomically decrements a counter column in ``row``.
If the counter column does not exist, it is automatically initialized
to ``0`` before being decremented.
:type row: str
:param row: Row key for the row we are decrementing a counter in.
:type column: str
:param column: Column we are decrementing a value in; of the
form ``fam:col``.
:type value: int
:param value: Amount to decrement the counter by. (If negative,
this is equivalent to increment.)
:rtype: int
:returns: Counter value after decrementing.
"""
return self.counter_inc(row, column, -value)
def _gc_rule_to_dict(gc_rule):
"""Converts garbage collection rule to dictionary if possible.
This is in place to support dictionary values as was done
in HappyBase, which has somewhat different garbage collection rule
settings for column families.
Only does this if the garbage collection rule is:
* :class:`gcloud.bigtable.column_family.MaxAgeGCRule`
* :class:`gcloud.bigtable.column_family.MaxVersionsGCRule`
* Composite :class:`gcloud.bigtable.column_family.GCRuleIntersection`
with two rules, one each of type
:class:`gcloud.bigtable.column_family.MaxAgeGCRule` and
:class:`gcloud.bigtable.column_family.MaxVersionsGCRule`
Otherwise, just returns the input without change.
:type gc_rule: :data:`NoneType <types.NoneType>`,
:class:`.GarbageCollectionRule`
:param gc_rule: A garbage collection rule to convert to a dictionary
(if possible).
:rtype: dict or
:class:`gcloud.bigtable.column_family.GarbageCollectionRule`
:returns: The converted garbage collection rule.
"""
result = gc_rule
if gc_rule is None:
result = {}
elif isinstance(gc_rule, MaxAgeGCRule):
result = {'time_to_live': _total_seconds(gc_rule.max_age)}
elif isinstance(gc_rule, MaxVersionsGCRule):
result = {'max_versions': gc_rule.max_num_versions}
elif isinstance(gc_rule, GCRuleIntersection):
if len(gc_rule.rules) == 2:
rule1, rule2 = gc_rule.rules
if (isinstance(rule1, _SIMPLE_GC_RULES) and
isinstance(rule2, _SIMPLE_GC_RULES)):
rule1 = _gc_rule_to_dict(rule1)
rule2 = _gc_rule_to_dict(rule2)
key1, = rule1.keys()
key2, = rule2.keys()
if key1 != key2:
result = {key1: rule1[key1], key2: rule2[key2]}
return result
def _next_char(str_val, index):
"""Gets the next character based on a position in a string.
:type str_val: str
:param str_val: A string containing the character to update.
:type index: int
:param index: An integer index in ``str_val``.
:rtype: str
:returns: The next character after the character at ``index``
in ``str_val``.
"""
ord_val = six.indexbytes(str_val, index)
return _to_bytes(chr(ord_val + 1), encoding='latin-1')
def _string_successor(str_val):
"""Increment and truncate a byte string.
Determines shortest string that sorts after the given string when
compared using regular string comparison semantics.
Modeled after implementation in ``gcloud-golang``.
Increments the last byte that is smaller than ``0xFF``, and
drops everything after it. If the string only contains ``0xFF`` bytes,
``''`` is returned.
:type str_val: str
:param str_val: String to increment.
:rtype: str
:returns: The next string in lexical order after ``str_val``.
"""
str_val = _to_bytes(str_val, encoding='latin-1')
if str_val == b'':
return str_val
index = len(str_val) - 1
while index >= 0:
if six.indexbytes(str_val, index) != 0xff:
break
index -= 1
if index == -1:
return b''
return str_val[:index] + _next_char(str_val, index)
def _convert_to_time_range(timestamp=None):
"""Create a timestamp range from an HBase / HappyBase timestamp.
HBase uses timestamp as an argument to specify an exclusive end
deadline. Cloud Bigtable also uses exclusive end times, so
the behavior matches.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch). Intended to be used as the end of an HBase
time range, which is exclusive.
:rtype: :class:`gcloud.bigtable.row.TimestampRange`,
:data:`NoneType <types.NoneType>`
:returns: The timestamp range corresponding to the passed in
``timestamp``.
"""
if timestamp is None:
return None
next_timestamp = _datetime_from_microseconds(1000 * timestamp)
return TimestampRange(end=next_timestamp)
def _cells_to_pairs(cells, include_timestamp=False):
"""Converts list of cells to HappyBase format.
For example::
>>> import datetime
>>> from gcloud.bigtable.row_data import Cell
>>> cell1 = Cell(b'val1', datetime.datetime.utcnow())
>>> cell2 = Cell(b'val2', datetime.datetime.utcnow())
>>> _cells_to_pairs([cell1, cell2])
[b'val1', b'val2']
>>> _cells_to_pairs([cell1, cell2], include_timestamp=True)
[(b'val1', 1456361486255), (b'val2', 1456361491927)]
:type cells: list
:param cells: List of :class:`gcloud.bigtable.row_data.Cell` returned
from a read request.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:rtype: list
:returns: List of values in the cell. If ``include_timestamp=True``, each
value will be a pair, with the first part the bytes value in
the cell and the second part the number of milliseconds in the
timestamp on the cell.
"""
result = []
for cell in cells:
if include_timestamp:
ts_millis = _microseconds_from_datetime(cell.timestamp) // 1000
result.append((cell.value, ts_millis))
else:
result.append(cell.value)
return result
def _partial_row_to_dict(partial_row_data, include_timestamp=False):
"""Convert a low-level row data object to a dictionary.
Assumes only the latest value in each row is needed. This assumption
is due to the fact that this method is used by callers which use
a ``CellsColumnLimitFilter(1)`` filter.
For example::
>>> import datetime
>>> from gcloud.bigtable.row_data import Cell, PartialRowData
>>> cell1 = Cell(b'val1', datetime.datetime.utcnow())
>>> cell2 = Cell(b'val2', datetime.datetime.utcnow())
>>> row_data = PartialRowData(b'row-key')
>>> _partial_row_to_dict(row_data)
{}
>>> row_data._cells[u'fam1'] = {b'col1': [cell1], b'col2': [cell2]}
>>> _partial_row_to_dict(row_data)
{b'fam1:col2': b'val2', b'fam1:col1': b'val1'}
>>> _partial_row_to_dict(row_data, include_timestamp=True)
{b'fam1:col2': (b'val2', 1456361724480),
b'fam1:col1': (b'val1', 1456361721135)}
:type partial_row_data: :class:`.row_data.PartialRowData`
:param partial_row_data: Row data consumed from a stream.
:type include_timestamp: bool
:param include_timestamp: Flag to indicate if cell timestamps should be
included with the output.
:rtype: dict
:returns: The row data converted to a dictionary.
"""
result = {}
for column, cells in six.iteritems(partial_row_data.to_dict()):
cell_vals = _cells_to_pairs(cells,
include_timestamp=include_timestamp)
# NOTE: We assume there is exactly 1 version since we used that in
# our filter, but we don't check this.
result[column] = cell_vals[0]
return result
def _filter_chain_helper(column=None, versions=None, timestamp=None,
filters=None):
"""Create filter chain to limit a results set.
:type column: str
:param column: (Optional) The column (``fam:col``) to be selected
with the filter.
:type versions: int
:param versions: (Optional) The maximum number of cells to return.
:type timestamp: int
:param timestamp: (Optional) Timestamp (in milliseconds since the
epoch). If specified, only cells returned before (or
at) the timestamp will be matched.
:type filters: list
:param filters: (Optional) List of existing filters to be extended.
:rtype: :class:`RowFilter <gcloud.bigtable.row.RowFilter>`
:returns: The chained filter created, or just a single filter if only
one was needed.
:raises: :class:`ValueError <exceptions.ValueError>` if there are no
filters to chain.
"""
if filters is None:
filters = []
if column is not None:
if isinstance(column, six.binary_type):
column = column.decode('utf-8')
column_family_id, column_qualifier = column.split(':')
fam_filter = FamilyNameRegexFilter(column_family_id)
qual_filter = ColumnQualifierRegexFilter(column_qualifier)
filters.extend([fam_filter, qual_filter])
if versions is not None:
filters.append(CellsColumnLimitFilter(versions))
time_range = _convert_to_time_range(timestamp=timestamp)
if time_range is not None:
filters.append(TimestampRangeFilter(time_range))
num_filters = len(filters)
if num_filters == 0:
raise ValueError('Must have at least one filter.')
elif num_filters == 1:
return filters[0]
else:
return RowFilterChain(filters=filters)
def _scan_filter_helper(row_start, row_stop, row_prefix, columns,
timestamp, limit, kwargs):
"""Helper for :meth:`scan`: build up a filter chain."""
filter_ = kwargs.pop('filter', None)
legacy_args = []
for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'):
if kw_name in kwargs:
legacy_args.append(kw_name)
kwargs.pop(kw_name)
if legacy_args:
legacy_args = ', '.join(legacy_args)
message = ('The HappyBase legacy arguments %s were used. These '
'arguments are unused by gcloud.' % (legacy_args,))
_WARN(message)
if kwargs:
raise TypeError('Received unexpected arguments', kwargs.keys())
if limit is not None and limit < 1:
raise ValueError('limit must be positive')
if row_prefix is not None:
if row_start is not None or row_stop is not None:
raise ValueError('row_prefix cannot be combined with '
'row_start or row_stop')
row_start = row_prefix
row_stop = _string_successor(row_prefix)
filters = []
if isinstance(filter_, six.string_types):
raise TypeError('Specifying filters as a string is not supported '
'by Cloud Bigtable. Use a '
'gcloud.bigtable.row.RowFilter instead.')
elif filter_ is not None:
filters.append(filter_)
if columns is not None:
filters.append(_columns_filter_helper(columns))
# versions == 1 since we only want the latest.
filter_ = _filter_chain_helper(versions=1, timestamp=timestamp,
filters=filters)
return row_start, row_stop, filter_
def _columns_filter_helper(columns):
"""Creates a union filter for a list of columns.
:type columns: list
:param columns: Iterable containing column names (as strings). Each column
name can be either
* an entire column family: ``fam`` or ``fam:``
* a single column: ``fam:col``
:rtype: :class:`RowFilter <gcloud.bigtable.row.RowFilter>`
:returns: The union filter created containing all of the matched columns.
:raises: :class:`ValueError <exceptions.ValueError>` if there are no
filters to union.
"""
filters = []
for column_family_id, column_qualifier in _get_column_pairs(columns):
fam_filter = FamilyNameRegexFilter(column_family_id)
if column_qualifier is not None:
qual_filter = ColumnQualifierRegexFilter(column_qualifier)
combined_filter = RowFilterChain(
filters=[fam_filter, qual_filter])
filters.append(combined_filter)
else:
filters.append(fam_filter)
num_filters = len(filters)
if num_filters == 0:
raise ValueError('Must have at least one filter.')
elif num_filters == 1:
return filters[0]
else:
return RowFilterUnion(filters=filters)
def _row_keys_filter_helper(row_keys):
"""Creates a union filter for a list of rows.
:type row_keys: list
:param row_keys: Iterable containing row keys (as strings).
:rtype: :class:`RowFilter <gcloud.bigtable.row.RowFilter>`
:returns: The union filter created containing all of the row keys.
:raises: :class:`ValueError <exceptions.ValueError>` if there are no
filters to union.
"""
filters = []
for row_key in row_keys:
filters.append(RowKeyRegexFilter(row_key))
num_filters = len(filters)
if num_filters == 0:
raise ValueError('Must have at least one filter.')
elif num_filters == 1:
return filters[0]
else:
return RowFilterUnion(filters=filters)

View file

@ -0,0 +1,568 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class _SendMixin(object):
_send_called = False
def send(self):
self._send_called = True
class TestBatch(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.happybase.batch import Batch
return Batch
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
table = object()
batch = self._makeOne(table)
self.assertEqual(batch._table, table)
self.assertEqual(batch._batch_size, None)
self.assertEqual(batch._timestamp, None)
self.assertEqual(batch._delete_range, None)
self.assertEqual(batch._transaction, False)
self.assertEqual(batch._row_map, {})
self.assertEqual(batch._mutation_count, 0)
def test_constructor_explicit(self):
from gcloud._helpers import _datetime_from_microseconds
from gcloud.bigtable.row_filters import TimestampRange
table = object()
timestamp = 144185290431
batch_size = 42
transaction = False # Must be False when batch_size is non-null
batch = self._makeOne(table, timestamp=timestamp,
batch_size=batch_size, transaction=transaction)
self.assertEqual(batch._table, table)
self.assertEqual(batch._batch_size, batch_size)
self.assertEqual(batch._timestamp,
_datetime_from_microseconds(1000 * timestamp))
next_timestamp = _datetime_from_microseconds(1000 * (timestamp + 1))
time_range = TimestampRange(end=next_timestamp)
self.assertEqual(batch._delete_range, time_range)
self.assertEqual(batch._transaction, transaction)
self.assertEqual(batch._row_map, {})
self.assertEqual(batch._mutation_count, 0)
def test_constructor_with_non_default_wal(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import batch as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
table = object()
wal = object()
with _Monkey(MUT, _WARN=mock_warn):
self._makeOne(table, wal=wal)
self.assertEqual(warned, [MUT._WAL_WARNING])
def test_constructor_with_non_positive_batch_size(self):
table = object()
batch_size = -10
with self.assertRaises(ValueError):
self._makeOne(table, batch_size=batch_size)
batch_size = 0
with self.assertRaises(ValueError):
self._makeOne(table, batch_size=batch_size)
def test_constructor_with_batch_size_and_transactional(self):
table = object()
batch_size = 1
transaction = True
with self.assertRaises(TypeError):
self._makeOne(table, batch_size=batch_size,
transaction=transaction)
def test_send(self):
table = object()
batch = self._makeOne(table)
batch._row_map = row_map = _MockRowMap()
row_map['row-key1'] = row1 = _MockRow()
row_map['row-key2'] = row2 = _MockRow()
batch._mutation_count = 1337
self.assertEqual(row_map.clear_count, 0)
self.assertEqual(row1.commits, 0)
self.assertEqual(row2.commits, 0)
self.assertNotEqual(batch._mutation_count, 0)
self.assertNotEqual(row_map, {})
batch.send()
self.assertEqual(row_map.clear_count, 1)
self.assertEqual(row1.commits, 1)
self.assertEqual(row2.commits, 1)
self.assertEqual(batch._mutation_count, 0)
self.assertEqual(row_map, {})
def test__try_send_no_batch_size(self):
klass = self._getTargetClass()
class BatchWithSend(_SendMixin, klass):
pass
table = object()
batch = BatchWithSend(table)
self.assertEqual(batch._batch_size, None)
self.assertFalse(batch._send_called)
batch._try_send()
self.assertFalse(batch._send_called)
def test__try_send_too_few_mutations(self):
klass = self._getTargetClass()
class BatchWithSend(_SendMixin, klass):
pass
table = object()
batch_size = 10
batch = BatchWithSend(table, batch_size=batch_size)
self.assertEqual(batch._batch_size, batch_size)
self.assertFalse(batch._send_called)
mutation_count = 2
batch._mutation_count = mutation_count
self.assertTrue(mutation_count < batch_size)
batch._try_send()
self.assertFalse(batch._send_called)
def test__try_send_actual_send(self):
klass = self._getTargetClass()
class BatchWithSend(_SendMixin, klass):
pass
table = object()
batch_size = 10
batch = BatchWithSend(table, batch_size=batch_size)
self.assertEqual(batch._batch_size, batch_size)
self.assertFalse(batch._send_called)
mutation_count = 12
batch._mutation_count = mutation_count
self.assertTrue(mutation_count > batch_size)
batch._try_send()
self.assertTrue(batch._send_called)
def test__get_row_exists(self):
table = object()
batch = self._makeOne(table)
row_key = 'row-key'
row_obj = object()
batch._row_map[row_key] = row_obj
result = batch._get_row(row_key)
self.assertEqual(result, row_obj)
def test__get_row_create_new(self):
# Make mock batch and make sure we can create a low-level table.
low_level_table = _MockLowLevelTable()
table = _MockTable(low_level_table)
batch = self._makeOne(table)
# Make sure row map is empty.
self.assertEqual(batch._row_map, {})
# Customize/capture mock table creation.
low_level_table.mock_row = mock_row = object()
# Actually get the row (which creates a row via a low-level table).
row_key = 'row-key'
result = batch._get_row(row_key)
self.assertEqual(result, mock_row)
# Check all the things that were constructed.
self.assertEqual(low_level_table.rows_made, [row_key])
# Check how the batch was updated.
self.assertEqual(batch._row_map, {row_key: mock_row})
def test_put_bad_wal(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import batch as MUT
warned = []
def mock_warn(message):
warned.append(message)
# Raise an exception so we don't have to mock the entire
# environment needed for put().
raise RuntimeError('No need to execute the rest.')
table = object()
batch = self._makeOne(table)
row = 'row-key'
data = {}
wal = None
self.assertNotEqual(wal, MUT._WAL_SENTINEL)
with _Monkey(MUT, _WARN=mock_warn):
with self.assertRaises(RuntimeError):
batch.put(row, data, wal=wal)
self.assertEqual(warned, [MUT._WAL_WARNING])
def test_put(self):
import operator
table = object()
batch = self._makeOne(table)
batch._timestamp = timestamp = object()
row_key = 'row-key'
batch._row_map[row_key] = row = _MockRow()
col1_fam = 'cf1'
col1_qual = 'qual1'
value1 = 'value1'
col2_fam = 'cf2'
col2_qual = 'qual2'
value2 = 'value2'
data = {col1_fam + ':' + col1_qual: value1,
col2_fam + ':' + col2_qual: value2}
self.assertEqual(batch._mutation_count, 0)
self.assertEqual(row.set_cell_calls, [])
batch.put(row_key, data)
self.assertEqual(batch._mutation_count, 2)
# Since the calls depend on data.keys(), the order
# is non-deterministic.
first_elt = operator.itemgetter(0)
ordered_calls = sorted(row.set_cell_calls, key=first_elt)
cell1_args = (col1_fam, col1_qual, value1)
cell1_kwargs = {'timestamp': timestamp}
cell2_args = (col2_fam, col2_qual, value2)
cell2_kwargs = {'timestamp': timestamp}
self.assertEqual(ordered_calls, [
(cell1_args, cell1_kwargs),
(cell2_args, cell2_kwargs),
])
def test_put_call_try_send(self):
klass = self._getTargetClass()
class CallTrySend(klass):
try_send_calls = 0
def _try_send(self):
self.try_send_calls += 1
table = object()
batch = CallTrySend(table)
row_key = 'row-key'
batch._row_map[row_key] = _MockRow()
self.assertEqual(batch._mutation_count, 0)
self.assertEqual(batch.try_send_calls, 0)
# No data so that nothing happens
batch.put(row_key, data={})
self.assertEqual(batch._mutation_count, 0)
self.assertEqual(batch.try_send_calls, 1)
def _delete_columns_test_helper(self, time_range=None):
table = object()
batch = self._makeOne(table)
batch._delete_range = time_range
col1_fam = 'cf1'
col2_fam = 'cf2'
col2_qual = 'col-name'
columns = [col1_fam + ':', col2_fam + ':' + col2_qual]
row_object = _MockRow()
batch._delete_columns(columns, row_object)
self.assertEqual(row_object.commits, 0)
cell_deleted_args = (col2_fam, col2_qual)
cell_deleted_kwargs = {'time_range': time_range}
self.assertEqual(row_object.delete_cell_calls,
[(cell_deleted_args, cell_deleted_kwargs)])
fam_deleted_args = (col1_fam,)
fam_deleted_kwargs = {'columns': row_object.ALL_COLUMNS}
self.assertEqual(row_object.delete_cells_calls,
[(fam_deleted_args, fam_deleted_kwargs)])
def test__delete_columns(self):
self._delete_columns_test_helper()
def test__delete_columns_w_time_and_col_fam(self):
time_range = object()
with self.assertRaises(ValueError):
self._delete_columns_test_helper(time_range=time_range)
def test_delete_bad_wal(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import batch as MUT
warned = []
def mock_warn(message):
warned.append(message)
# Raise an exception so we don't have to mock the entire
# environment needed for delete().
raise RuntimeError('No need to execute the rest.')
table = object()
batch = self._makeOne(table)
row = 'row-key'
columns = []
wal = None
self.assertNotEqual(wal, MUT._WAL_SENTINEL)
with _Monkey(MUT, _WARN=mock_warn):
with self.assertRaises(RuntimeError):
batch.delete(row, columns=columns, wal=wal)
self.assertEqual(warned, [MUT._WAL_WARNING])
def test_delete_entire_row(self):
table = object()
batch = self._makeOne(table)
row_key = 'row-key'
batch._row_map[row_key] = row = _MockRow()
self.assertEqual(row.deletes, 0)
self.assertEqual(batch._mutation_count, 0)
batch.delete(row_key, columns=None)
self.assertEqual(row.deletes, 1)
self.assertEqual(batch._mutation_count, 1)
def test_delete_entire_row_with_ts(self):
table = object()
batch = self._makeOne(table)
batch._delete_range = object()
row_key = 'row-key'
batch._row_map[row_key] = row = _MockRow()
self.assertEqual(row.deletes, 0)
self.assertEqual(batch._mutation_count, 0)
with self.assertRaises(ValueError):
batch.delete(row_key, columns=None)
self.assertEqual(row.deletes, 0)
self.assertEqual(batch._mutation_count, 0)
def test_delete_call_try_send(self):
klass = self._getTargetClass()
class CallTrySend(klass):
try_send_calls = 0
def _try_send(self):
self.try_send_calls += 1
table = object()
batch = CallTrySend(table)
row_key = 'row-key'
batch._row_map[row_key] = _MockRow()
self.assertEqual(batch._mutation_count, 0)
self.assertEqual(batch.try_send_calls, 0)
# No columns so that nothing happens
batch.delete(row_key, columns=[])
self.assertEqual(batch._mutation_count, 0)
self.assertEqual(batch.try_send_calls, 1)
def test_delete_some_columns(self):
table = object()
batch = self._makeOne(table)
row_key = 'row-key'
batch._row_map[row_key] = row = _MockRow()
self.assertEqual(batch._mutation_count, 0)
col1_fam = 'cf1'
col2_fam = 'cf2'
col2_qual = 'col-name'
columns = [col1_fam + ':', col2_fam + ':' + col2_qual]
batch.delete(row_key, columns=columns)
self.assertEqual(batch._mutation_count, 2)
cell_deleted_args = (col2_fam, col2_qual)
cell_deleted_kwargs = {'time_range': None}
self.assertEqual(row.delete_cell_calls,
[(cell_deleted_args, cell_deleted_kwargs)])
fam_deleted_args = (col1_fam,)
fam_deleted_kwargs = {'columns': row.ALL_COLUMNS}
self.assertEqual(row.delete_cells_calls,
[(fam_deleted_args, fam_deleted_kwargs)])
def test_context_manager(self):
klass = self._getTargetClass()
class BatchWithSend(_SendMixin, klass):
pass
table = object()
batch = BatchWithSend(table)
self.assertFalse(batch._send_called)
with batch:
pass
self.assertTrue(batch._send_called)
def test_context_manager_with_exception_non_transactional(self):
klass = self._getTargetClass()
class BatchWithSend(_SendMixin, klass):
pass
table = object()
batch = BatchWithSend(table)
self.assertFalse(batch._send_called)
with self.assertRaises(ValueError):
with batch:
raise ValueError('Something bad happened')
self.assertTrue(batch._send_called)
def test_context_manager_with_exception_transactional(self):
klass = self._getTargetClass()
class BatchWithSend(_SendMixin, klass):
pass
table = object()
batch = BatchWithSend(table, transaction=True)
self.assertFalse(batch._send_called)
with self.assertRaises(ValueError):
with batch:
raise ValueError('Something bad happened')
self.assertFalse(batch._send_called)
# Just to make sure send() actually works (and to make cover happy).
batch.send()
self.assertTrue(batch._send_called)
class Test__get_column_pairs(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.happybase.batch import _get_column_pairs
return _get_column_pairs(*args, **kwargs)
def test_it(self):
columns = [b'cf1', u'cf2:', 'cf3::', 'cf3:name1', 'cf3:name2']
result = self._callFUT(columns)
expected_result = [
['cf1', None],
['cf2', None],
['cf3', ''],
['cf3', 'name1'],
['cf3', 'name2'],
]
self.assertEqual(result, expected_result)
def test_bad_column(self):
columns = ['a:b:c']
with self.assertRaises(ValueError):
self._callFUT(columns)
def test_bad_column_type(self):
columns = [None]
with self.assertRaises(AttributeError):
self._callFUT(columns)
def test_bad_columns_var(self):
columns = None
with self.assertRaises(TypeError):
self._callFUT(columns)
def test_column_family_with_require_qualifier(self):
columns = ['a:']
with self.assertRaises(ValueError):
self._callFUT(columns, require_qualifier=True)
class _MockRowMap(dict):
clear_count = 0
def clear(self):
self.clear_count += 1
super(_MockRowMap, self).clear()
class _MockRow(object):
ALL_COLUMNS = object()
def __init__(self):
self.commits = 0
self.deletes = 0
self.set_cell_calls = []
self.delete_cell_calls = []
self.delete_cells_calls = []
def commit(self):
self.commits += 1
def delete(self):
self.deletes += 1
def set_cell(self, *args, **kwargs):
self.set_cell_calls.append((args, kwargs))
def delete_cell(self, *args, **kwargs):
self.delete_cell_calls.append((args, kwargs))
def delete_cells(self, *args, **kwargs):
self.delete_cells_calls.append((args, kwargs))
class _MockTable(object):
def __init__(self, low_level_table):
self._low_level_table = low_level_table
class _MockLowLevelTable(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.rows_made = []
self.mock_row = None
def row(self, row_key):
self.rows_made.append(row_key)
return self.mock_row

View file

@ -0,0 +1,682 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest2
class Test__get_instance(unittest2.TestCase):
def _callFUT(self, timeout=None):
from gcloud.bigtable.happybase.connection import _get_instance
return _get_instance(timeout=timeout)
def _helper(self, timeout=None, instances=(), failed_locations=()):
from functools import partial
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
client_with_instances = partial(
_Client, instances=instances, failed_locations=failed_locations)
with _Monkey(MUT, Client=client_with_instances):
result = self._callFUT(timeout=timeout)
# If we've reached this point, then _callFUT didn't fail, so we know
# there is exactly one instance.
instance, = instances
self.assertEqual(result, instance)
client = instance.client
self.assertEqual(client.args, ())
expected_kwargs = {'admin': True}
if timeout is not None:
expected_kwargs['timeout_seconds'] = timeout / 1000.0
self.assertEqual(client.kwargs, expected_kwargs)
self.assertEqual(client.start_calls, 1)
self.assertEqual(client.stop_calls, 1)
def test_default(self):
instance = _Instance()
self._helper(instances=[instance])
def test_with_timeout(self):
instance = _Instance()
self._helper(timeout=2103, instances=[instance])
def test_with_no_instances(self):
with self.assertRaises(ValueError):
self._helper()
def test_with_too_many_instances(self):
instances = [_Instance(), _Instance()]
with self.assertRaises(ValueError):
self._helper(instances=instances)
def test_with_failed_locations(self):
instance = _Instance()
failed_location = 'us-central1-c'
with self.assertRaises(ValueError):
self._helper(instances=[instance],
failed_locations=[failed_location])
class TestConnection(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.happybase.connection import Connection
return Connection
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
instance = _Instance() # Avoid implicit environ check.
self.assertEqual(instance._client.start_calls, 0)
connection = self._makeOne(instance=instance)
self.assertEqual(instance._client.start_calls, 1)
self.assertEqual(instance._client.stop_calls, 0)
self.assertEqual(connection._instance, instance)
self.assertEqual(connection.table_prefix, None)
self.assertEqual(connection.table_prefix_separator, '_')
def test_constructor_no_autoconnect(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
self.assertEqual(instance._client.start_calls, 0)
self.assertEqual(instance._client.stop_calls, 0)
self.assertEqual(connection.table_prefix, None)
self.assertEqual(connection.table_prefix_separator, '_')
def test_constructor_missing_instance(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
instance = _Instance()
timeout = object()
get_instance_called = []
def mock_get_instance(timeout):
get_instance_called.append(timeout)
return instance
with _Monkey(MUT, _get_instance=mock_get_instance):
connection = self._makeOne(autoconnect=False, instance=None,
timeout=timeout)
self.assertEqual(connection.table_prefix, None)
self.assertEqual(connection.table_prefix_separator, '_')
self.assertEqual(connection._instance, instance)
self.assertEqual(get_instance_called, [timeout])
def test_constructor_explicit(self):
autoconnect = False
table_prefix = 'table-prefix'
table_prefix_separator = 'sep'
instance_copy = _Instance()
instance = _Instance(copies=[instance_copy])
connection = self._makeOne(
autoconnect=autoconnect,
table_prefix=table_prefix,
table_prefix_separator=table_prefix_separator,
instance=instance)
self.assertEqual(connection.table_prefix, table_prefix)
self.assertEqual(connection.table_prefix_separator,
table_prefix_separator)
def test_constructor_with_unknown_argument(self):
instance = _Instance()
with self.assertRaises(TypeError):
self._makeOne(instance=instance, unknown='foo')
def test_constructor_with_legacy_args(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
instance = _Instance()
with _Monkey(MUT, _WARN=mock_warn):
self._makeOne(instance=instance, host=object(),
port=object(), compat=object(),
transport=object(), protocol=object())
self.assertEqual(len(warned), 1)
self.assertIn('host', warned[0])
self.assertIn('port', warned[0])
self.assertIn('compat', warned[0])
self.assertIn('transport', warned[0])
self.assertIn('protocol', warned[0])
def test_constructor_with_timeout_and_instance(self):
instance = _Instance()
with self.assertRaises(ValueError):
self._makeOne(instance=instance, timeout=object())
def test_constructor_non_string_prefix(self):
table_prefix = object()
with self.assertRaises(TypeError):
self._makeOne(autoconnect=False,
table_prefix=table_prefix)
def test_constructor_non_string_prefix_separator(self):
table_prefix_separator = object()
with self.assertRaises(TypeError):
self._makeOne(autoconnect=False,
table_prefix_separator=table_prefix_separator)
def test_open(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
self.assertEqual(instance._client.start_calls, 0)
connection.open()
self.assertEqual(instance._client.start_calls, 1)
self.assertEqual(instance._client.stop_calls, 0)
def test_close(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
self.assertEqual(instance._client.stop_calls, 0)
connection.close()
self.assertEqual(instance._client.stop_calls, 1)
self.assertEqual(instance._client.start_calls, 0)
def test___del__with_instance(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
self.assertEqual(instance._client.stop_calls, 0)
connection.__del__()
self.assertEqual(instance._client.stop_calls, 1)
def test___del__no_instance(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
self.assertEqual(instance._client.stop_calls, 0)
del connection._instance
connection.__del__()
self.assertEqual(instance._client.stop_calls, 0)
def test__table_name_with_prefix_set(self):
table_prefix = 'table-prefix'
table_prefix_separator = '<>'
instance = _Instance()
connection = self._makeOne(
autoconnect=False,
table_prefix=table_prefix,
table_prefix_separator=table_prefix_separator,
instance=instance)
name = 'some-name'
prefixed = connection._table_name(name)
self.assertEqual(prefixed,
table_prefix + table_prefix_separator + name)
def test__table_name_with_no_prefix_set(self):
instance = _Instance()
connection = self._makeOne(autoconnect=False,
instance=instance)
name = 'some-name'
prefixed = connection._table_name(name)
self.assertEqual(prefixed, name)
def test_table_factory(self):
from gcloud.bigtable.happybase.table import Table
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
table = connection.table(name)
self.assertTrue(isinstance(table, Table))
self.assertEqual(table.name, name)
self.assertEqual(table.connection, connection)
def _table_factory_prefix_helper(self, use_prefix=True):
from gcloud.bigtable.happybase.table import Table
instance = _Instance() # Avoid implicit environ check.
table_prefix = 'table-prefix'
table_prefix_separator = '<>'
connection = self._makeOne(
autoconnect=False, table_prefix=table_prefix,
table_prefix_separator=table_prefix_separator,
instance=instance)
name = 'table-name'
table = connection.table(name, use_prefix=use_prefix)
self.assertTrue(isinstance(table, Table))
prefixed_name = table_prefix + table_prefix_separator + name
if use_prefix:
self.assertEqual(table.name, prefixed_name)
else:
self.assertEqual(table.name, name)
self.assertEqual(table.connection, connection)
def test_table_factory_with_prefix(self):
self._table_factory_prefix_helper(use_prefix=True)
def test_table_factory_with_ignored_prefix(self):
self._table_factory_prefix_helper(use_prefix=False)
def test_tables(self):
from gcloud.bigtable.table import Table
table_name1 = 'table-name1'
table_name2 = 'table-name2'
instance = _Instance(list_tables_result=[
Table(table_name1, None),
Table(table_name2, None),
])
connection = self._makeOne(autoconnect=False, instance=instance)
result = connection.tables()
self.assertEqual(result, [table_name1, table_name2])
def test_tables_with_prefix(self):
from gcloud.bigtable.table import Table
table_prefix = 'prefix'
table_prefix_separator = '<>'
unprefixed_table_name1 = 'table-name1'
table_name1 = (table_prefix + table_prefix_separator +
unprefixed_table_name1)
table_name2 = 'table-name2'
instance = _Instance(list_tables_result=[
Table(table_name1, None),
Table(table_name2, None),
])
connection = self._makeOne(
autoconnect=False, instance=instance, table_prefix=table_prefix,
table_prefix_separator=table_prefix_separator)
result = connection.tables()
self.assertEqual(result, [unprefixed_table_name1])
def test_create_table(self):
import operator
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
mock_gc_rule = object()
called_options = []
def mock_parse_family_option(option):
called_options.append(option)
return mock_gc_rule
name = 'table-name'
col_fam1 = 'cf1'
col_fam_option1 = object()
col_fam2 = u'cf2'
col_fam_option2 = object()
col_fam3 = b'cf3'
col_fam_option3 = object()
families = {
col_fam1: col_fam_option1,
# A trailing colon is also allowed.
col_fam2 + ':': col_fam_option2,
col_fam3 + b':': col_fam_option3,
}
tables_created = []
def make_table(*args, **kwargs):
result = _MockLowLevelTable(*args, **kwargs)
tables_created.append(result)
return result
with _Monkey(MUT, _LowLevelTable=make_table,
_parse_family_option=mock_parse_family_option):
connection.create_table(name, families)
# Just one table would have been created.
table_instance, = tables_created
self.assertEqual(table_instance.args, (name, instance))
self.assertEqual(table_instance.kwargs, {})
self.assertEqual(table_instance.create_calls, 1)
# Check if our mock was called twice, but we don't know the order.
self.assertEqual(
set(called_options),
set([col_fam_option1, col_fam_option2, col_fam_option3]))
# We expect three column family instances created, but don't know the
# order due to non-deterministic dict.items().
col_fam_created = table_instance.col_fam_created
self.assertEqual(len(col_fam_created), 3)
col_fam_created.sort(key=operator.attrgetter('column_family_id'))
self.assertEqual(col_fam_created[0].column_family_id, col_fam1)
self.assertEqual(col_fam_created[0].gc_rule, mock_gc_rule)
self.assertEqual(col_fam_created[0].create_calls, 1)
self.assertEqual(col_fam_created[1].column_family_id, col_fam2)
self.assertEqual(col_fam_created[1].gc_rule, mock_gc_rule)
self.assertEqual(col_fam_created[1].create_calls, 1)
self.assertEqual(col_fam_created[2].column_family_id,
col_fam3.decode('utf-8'))
self.assertEqual(col_fam_created[2].gc_rule, mock_gc_rule)
self.assertEqual(col_fam_created[2].create_calls, 1)
def test_create_table_bad_type(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
families = None
with self.assertRaises(TypeError):
connection.create_table(name, families)
def test_create_table_bad_value(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
families = {}
with self.assertRaises(ValueError):
connection.create_table(name, families)
def _create_table_error_helper(self, err_val, err_type):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
tables_created = []
def make_table(*args, **kwargs):
kwargs['create_error'] = err_val
result = _MockLowLevelTable(*args, **kwargs)
tables_created.append(result)
return result
name = 'table-name'
families = {'foo': {}}
with _Monkey(MUT, _LowLevelTable=make_table):
with self.assertRaises(err_type):
connection.create_table(name, families)
self.assertEqual(len(tables_created), 1)
self.assertEqual(tables_created[0].create_calls, 1)
@unittest2.skipUnless(sys.version_info[:2] == (2, 7),
'gRPC only in Python 2.7')
def test_create_table_already_exists(self):
from grpc.beta import interfaces
from grpc.framework.interfaces.face import face
from gcloud.bigtable.happybase.connection import AlreadyExists
err_val = face.NetworkError(None, None,
interfaces.StatusCode.ALREADY_EXISTS, None)
self._create_table_error_helper(err_val, AlreadyExists)
@unittest2.skipUnless(sys.version_info[:2] == (2, 7),
'gRPC only in Python 2.7')
def test_create_table_connection_error(self):
from grpc.beta import interfaces
from grpc.framework.interfaces.face import face
err_val = face.NetworkError(None, None,
interfaces.StatusCode.INTERNAL, None)
self._create_table_error_helper(err_val, face.NetworkError)
@unittest2.skipUnless(sys.version_info[:2] == (2, 7),
'gRPC only in Python 2.7')
def test_create_table_other_error(self):
self._create_table_error_helper(RuntimeError, RuntimeError)
def _delete_table_helper(self, disable=False):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
tables_created = []
def make_table(*args, **kwargs):
result = _MockLowLevelTable(*args, **kwargs)
tables_created.append(result)
return result
name = 'table-name'
with _Monkey(MUT, _LowLevelTable=make_table):
connection.delete_table(name, disable=disable)
# Just one table would have been created.
table_instance, = tables_created
self.assertEqual(table_instance.args, (name, instance))
self.assertEqual(table_instance.kwargs, {})
self.assertEqual(table_instance.delete_calls, 1)
def test_delete_table(self):
self._delete_table_helper()
def test_delete_table_disable(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
with _Monkey(MUT, _WARN=mock_warn):
self._delete_table_helper(disable=True)
self.assertEqual(warned, [MUT._DISABLE_DELETE_MSG])
def test_enable_table(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
with self.assertRaises(NotImplementedError):
connection.enable_table(name)
def test_disable_table(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
with self.assertRaises(NotImplementedError):
connection.disable_table(name)
def test_is_table_enabled(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
with self.assertRaises(NotImplementedError):
connection.is_table_enabled(name)
def test_compact_table(self):
instance = _Instance() # Avoid implicit environ check.
connection = self._makeOne(autoconnect=False, instance=instance)
name = 'table-name'
major = True
with self.assertRaises(NotImplementedError):
connection.compact_table(name, major=major)
class Test__parse_family_option(unittest2.TestCase):
def _callFUT(self, option):
from gcloud.bigtable.happybase.connection import _parse_family_option
return _parse_family_option(option)
def test_dictionary_no_keys(self):
option = {}
result = self._callFUT(option)
self.assertEqual(result, None)
def test_null(self):
option = None
result = self._callFUT(option)
self.assertEqual(result, None)
def test_dictionary_bad_key(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import connection as MUT
warned = []
def mock_warn(msg):
warned.append(msg)
option = {'badkey': None}
with _Monkey(MUT, _WARN=mock_warn):
result = self._callFUT(option)
self.assertEqual(result, None)
self.assertEqual(len(warned), 1)
self.assertIn('badkey', warned[0])
def test_dictionary_versions_key(self):
from gcloud.bigtable.column_family import MaxVersionsGCRule
versions = 42
option = {'max_versions': versions}
result = self._callFUT(option)
gc_rule = MaxVersionsGCRule(versions)
self.assertEqual(result, gc_rule)
def test_dictionary_ttl_key(self):
import datetime
from gcloud.bigtable.column_family import MaxAgeGCRule
time_to_live = 24 * 60 * 60
max_age = datetime.timedelta(days=1)
option = {'time_to_live': time_to_live}
result = self._callFUT(option)
gc_rule = MaxAgeGCRule(max_age)
self.assertEqual(result, gc_rule)
def test_dictionary_both_keys(self):
import datetime
from gcloud.bigtable.column_family import GCRuleIntersection
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
versions = 42
time_to_live = 24 * 60 * 60
option = {
'max_versions': versions,
'time_to_live': time_to_live,
}
result = self._callFUT(option)
max_age = datetime.timedelta(days=1)
# NOTE: This relies on the order of the rules in the method we are
# calling matching this order here.
gc_rule1 = MaxAgeGCRule(max_age)
gc_rule2 = MaxVersionsGCRule(versions)
gc_rule = GCRuleIntersection(rules=[gc_rule1, gc_rule2])
self.assertEqual(result, gc_rule)
def test_non_dictionary(self):
option = object()
self.assertFalse(isinstance(option, dict))
result = self._callFUT(option)
self.assertEqual(result, option)
class _Client(object):
def __init__(self, *args, **kwargs):
self.instances = kwargs.pop('instances', [])
for instance in self.instances:
instance.client = self
self.failed_locations = kwargs.pop('failed_locations', [])
self.args = args
self.kwargs = kwargs
self.start_calls = 0
self.stop_calls = 0
def start(self):
self.start_calls += 1
def stop(self):
self.stop_calls += 1
def list_instances(self):
return self.instances, self.failed_locations
class _Instance(object):
def __init__(self, copies=(), list_tables_result=()):
self.copies = list(copies)
# Included to support Connection.__del__
self._client = _Client()
self.list_tables_result = list_tables_result
def copy(self):
if self.copies:
result = self.copies[0]
self.copies[:] = self.copies[1:]
return result
else:
return self
def list_tables(self):
return self.list_tables_result
class _MockLowLevelColumnFamily(object):
def __init__(self, column_family_id, gc_rule=None):
self.column_family_id = column_family_id
self.gc_rule = gc_rule
self.create_calls = 0
def create(self):
self.create_calls += 1
class _MockLowLevelTable(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.create_error = kwargs.get('create_error')
self.delete_calls = 0
self.create_calls = 0
self.col_fam_created = []
def delete(self):
self.delete_calls += 1
def create(self):
self.create_calls += 1
if self.create_error:
raise self.create_error
def column_family(self, column_family_id, gc_rule=None):
result = _MockLowLevelColumnFamily(column_family_id, gc_rule=gc_rule)
self.col_fam_created.append(result)
return result

View file

@ -0,0 +1,264 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestConnectionPool(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.happybase.pool import ConnectionPool
return ConnectionPool
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
import six
import threading
from gcloud.bigtable.happybase.connection import Connection
size = 11
instance_copy = _Instance()
all_copies = [instance_copy] * size
instance = _Instance(all_copies) # Avoid implicit environ check.
pool = self._makeOne(size, instance=instance)
self.assertTrue(isinstance(pool._lock, type(threading.Lock())))
self.assertTrue(isinstance(pool._thread_connections, threading.local))
self.assertEqual(pool._thread_connections.__dict__, {})
queue = pool._queue
self.assertTrue(isinstance(queue, six.moves.queue.LifoQueue))
self.assertTrue(queue.full())
self.assertEqual(queue.maxsize, size)
for connection in queue.queue:
self.assertTrue(isinstance(connection, Connection))
self.assertTrue(connection._instance is instance_copy)
def test_constructor_passes_kwargs(self):
table_prefix = 'foo'
table_prefix_separator = '<>'
instance = _Instance() # Avoid implicit environ check.
size = 1
pool = self._makeOne(size, table_prefix=table_prefix,
table_prefix_separator=table_prefix_separator,
instance=instance)
for connection in pool._queue.queue:
self.assertEqual(connection.table_prefix, table_prefix)
self.assertEqual(connection.table_prefix_separator,
table_prefix_separator)
def test_constructor_ignores_autoconnect(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase.connection import Connection
from gcloud.bigtable.happybase import pool as MUT
class ConnectionWithOpen(Connection):
_open_called = False
def open(self):
self._open_called = True
# First make sure the custom Connection class does as expected.
instance_copy1 = _Instance()
instance_copy2 = _Instance()
instance_copy3 = _Instance()
instance = _Instance([instance_copy1, instance_copy2, instance_copy3])
connection = ConnectionWithOpen(autoconnect=False, instance=instance)
self.assertFalse(connection._open_called)
self.assertTrue(connection._instance is instance_copy1)
connection = ConnectionWithOpen(autoconnect=True, instance=instance)
self.assertTrue(connection._open_called)
self.assertTrue(connection._instance is instance_copy2)
# Then make sure autoconnect=True is ignored in a pool.
size = 1
with _Monkey(MUT, Connection=ConnectionWithOpen):
pool = self._makeOne(size, autoconnect=True, instance=instance)
for connection in pool._queue.queue:
self.assertTrue(isinstance(connection, ConnectionWithOpen))
self.assertTrue(connection._instance is instance_copy3)
self.assertFalse(connection._open_called)
def test_constructor_infers_instance(self):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase.connection import Connection
from gcloud.bigtable.happybase import pool as MUT
size = 1
instance_copy = _Instance()
all_copies = [instance_copy] * size
instance = _Instance(all_copies)
get_instance_calls = []
def mock_get_instance(timeout=None):
get_instance_calls.append(timeout)
return instance
with _Monkey(MUT, _get_instance=mock_get_instance):
pool = self._makeOne(size)
for connection in pool._queue.queue:
self.assertTrue(isinstance(connection, Connection))
# We know that the Connection() constructor will
# call instance.copy().
self.assertTrue(connection._instance is instance_copy)
self.assertEqual(get_instance_calls, [None])
def test_constructor_non_integer_size(self):
size = None
with self.assertRaises(TypeError):
self._makeOne(size)
def test_constructor_non_positive_size(self):
size = -10
with self.assertRaises(ValueError):
self._makeOne(size)
size = 0
with self.assertRaises(ValueError):
self._makeOne(size)
def _makeOneWithMockQueue(self, queue_return):
from gcloud._testing import _Monkey
from gcloud.bigtable.happybase import pool as MUT
# We are going to use a fake queue, so we don't want any connections
# or instances to be created in the constructor.
size = -1
instance = object()
with _Monkey(MUT, _MIN_POOL_SIZE=size):
pool = self._makeOne(size, instance=instance)
pool._queue = _Queue(queue_return)
return pool
def test__acquire_connection(self):
queue_return = object()
pool = self._makeOneWithMockQueue(queue_return)
timeout = 432
connection = pool._acquire_connection(timeout=timeout)
self.assertTrue(connection is queue_return)
self.assertEqual(pool._queue._get_calls, [(True, timeout)])
self.assertEqual(pool._queue._put_calls, [])
def test__acquire_connection_failure(self):
from gcloud.bigtable.happybase.pool import NoConnectionsAvailable
pool = self._makeOneWithMockQueue(None)
timeout = 1027
with self.assertRaises(NoConnectionsAvailable):
pool._acquire_connection(timeout=timeout)
self.assertEqual(pool._queue._get_calls, [(True, timeout)])
self.assertEqual(pool._queue._put_calls, [])
def test_connection_is_context_manager(self):
import contextlib
import six
queue_return = _Connection()
pool = self._makeOneWithMockQueue(queue_return)
cnxn_context = pool.connection()
if six.PY3: # pragma: NO COVER Python 3
self.assertTrue(isinstance(cnxn_context,
contextlib._GeneratorContextManager))
else:
self.assertTrue(isinstance(cnxn_context,
contextlib.GeneratorContextManager))
def test_connection_no_current_cnxn(self):
queue_return = _Connection()
pool = self._makeOneWithMockQueue(queue_return)
timeout = 55
self.assertFalse(hasattr(pool._thread_connections, 'current'))
with pool.connection(timeout=timeout) as connection:
self.assertEqual(pool._thread_connections.current, queue_return)
self.assertTrue(connection is queue_return)
self.assertFalse(hasattr(pool._thread_connections, 'current'))
self.assertEqual(pool._queue._get_calls, [(True, timeout)])
self.assertEqual(pool._queue._put_calls,
[(queue_return, None, None)])
def test_connection_with_current_cnxn(self):
current_cnxn = _Connection()
queue_return = _Connection()
pool = self._makeOneWithMockQueue(queue_return)
pool._thread_connections.current = current_cnxn
timeout = 8001
with pool.connection(timeout=timeout) as connection:
self.assertTrue(connection is current_cnxn)
self.assertEqual(pool._queue._get_calls, [])
self.assertEqual(pool._queue._put_calls, [])
self.assertEqual(pool._thread_connections.current, current_cnxn)
class _Client(object):
def __init__(self):
self.stop_calls = 0
def stop(self):
self.stop_calls += 1
class _Connection(object):
def open(self):
pass
class _Instance(object):
def __init__(self, copies=()):
self.copies = list(copies)
# Included to support Connection.__del__
self._client = _Client()
def copy(self):
if self.copies:
result = self.copies[0]
self.copies[:] = self.copies[1:]
return result
else:
return self
class _Queue(object):
def __init__(self, result=None):
self.result = result
self._get_calls = []
self._put_calls = []
def get(self, block=None, timeout=None):
self._get_calls.append((block, timeout))
if self.result is None:
import six
raise six.moves.queue.Empty
else:
return self.result
def put(self, item, block=None, timeout=None):
self._put_calls.append((item, block, timeout))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,488 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Instance."""
import re
from google.longrunning import operations_pb2
from gcloud._helpers import _pb_timestamp_to_datetime
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_messages_v2_pb2)
from gcloud.bigtable.cluster import Cluster
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
from gcloud.bigtable.table import Table
_EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster'
_INSTANCE_NAME_RE = re.compile(r'^projects/(?P<project>[^/]+)/'
r'instances/(?P<instance_id>[a-z][-a-z0-9]*)$')
_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/'
r'instances/([a-z][-a-z0-9]*)/'
r'locations/(?P<location_id>[a-z][-a-z0-9]*)/'
r'operations/(?P<operation_id>\d+)$')
_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.'
_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.'
_INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata'
_TYPE_URL_MAP = {
_INSTANCE_CREATE_METADATA: messages_v2_pb2.CreateInstanceMetadata,
}
def _prepare_create_request(instance):
"""Creates a protobuf request for a CreateInstance request.
:type instance: :class:`Instance`
:param instance: The instance to be created.
:rtype: :class:`.messages_v2_pb2.CreateInstanceRequest`
:returns: The CreateInstance request object containing the instance info.
"""
parent_name = ('projects/' + instance._client.project)
message = messages_v2_pb2.CreateInstanceRequest(
parent=parent_name,
instance_id=instance.instance_id,
instance=data_v2_pb2.Instance(
display_name=instance.display_name,
),
)
cluster = message.clusters[instance.instance_id]
cluster.name = instance.name + '/clusters/' + instance.instance_id
cluster.location = (
parent_name + '/locations/' + instance._cluster_location_id)
cluster.serve_nodes = instance._cluster_serve_nodes
return message
def _parse_pb_any_to_native(any_val, expected_type=None):
"""Convert a serialized "google.protobuf.Any" value to actual type.
:type any_val: :class:`google.protobuf.any_pb2.Any`
:param any_val: A serialized protobuf value container.
:type expected_type: str
:param expected_type: (Optional) The type URL we expect ``any_val``
to have.
:rtype: object
:returns: The de-serialized object.
:raises: :class:`ValueError <exceptions.ValueError>` if the
``expected_type`` does not match the ``type_url`` on the input.
"""
if expected_type is not None and expected_type != any_val.type_url:
raise ValueError('Expected type: %s, Received: %s' % (
expected_type, any_val.type_url))
container_class = _TYPE_URL_MAP[any_val.type_url]
return container_class.FromString(any_val.value)
def _process_operation(operation_pb):
"""Processes a create protobuf response.
:type operation_pb: :class:`google.longrunning.operations_pb2.Operation`
:param operation_pb: The long-running operation response from a
Create/Update/Undelete instance request.
:rtype: (int, str, datetime)
:returns: (operation_id, location_id, operation_begin).
:raises: :class:`ValueError <exceptions.ValueError>` if the operation name
doesn't match the :data:`_OPERATION_NAME_RE` regex.
"""
match = _OPERATION_NAME_RE.match(operation_pb.name)
if match is None:
raise ValueError('Operation name was not in the expected '
'format after instance creation.',
operation_pb.name)
location_id = match.group('location_id')
operation_id = int(match.group('operation_id'))
request_metadata = _parse_pb_any_to_native(operation_pb.metadata)
operation_begin = _pb_timestamp_to_datetime(
request_metadata.request_time)
return operation_id, location_id, operation_begin
class Operation(object):
"""Representation of a Google API Long-Running Operation.
In particular, these will be the result of operations on
instances using the Cloud Bigtable API.
:type op_type: str
:param op_type: The type of operation being performed. Expect
``create``, ``update`` or ``undelete``.
:type op_id: int
:param op_id: The ID of the operation.
:type begin: :class:`datetime.datetime`
:param begin: The time when the operation was started.
:type location_id: str
:param location_id: ID of the location in which the operation is running
:type instance: :class:`Instance`
:param instance: The instance that created the operation.
"""
def __init__(self, op_type, op_id, begin, location_id, instance=None):
self.op_type = op_type
self.op_id = op_id
self.begin = begin
self.location_id = location_id
self._instance = instance
self._complete = False
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.op_type == self.op_type and
other.op_id == self.op_id and
other.begin == self.begin and
other.location_id == self.location_id and
other._instance == self._instance and
other._complete == self._complete)
def __ne__(self, other):
return not self.__eq__(other)
def finished(self):
"""Check if the operation has finished.
:rtype: bool
:returns: A boolean indicating if the current operation has completed.
:raises: :class:`ValueError <exceptions.ValueError>` if the operation
has already completed.
"""
if self._complete:
raise ValueError('The operation has completed.')
operation_name = (
'operations/%s/locations/%s/operations/%d' %
(self._instance.name, self.location_id, self.op_id))
request_pb = operations_pb2.GetOperationRequest(name=operation_name)
# We expect a `google.longrunning.operations_pb2.Operation`.
operation_pb = self._instance._client._operations_stub.GetOperation(
request_pb, self._instance._client.timeout_seconds)
if operation_pb.done:
self._complete = True
return True
else:
return False
class Instance(object):
"""Representation of a Google Cloud Bigtable Instance.
We can use a :class:`Instance` to:
* :meth:`reload` itself
* :meth:`create` itself
* :meth:`update` itself
* :meth:`delete` itself
* :meth:`undelete` itself
.. note::
For now, we leave out the ``default_storage_type`` (an enum)
which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`.
:type instance_id: str
:param instance_id: The ID of the instance.
:type client: :class:`Client <gcloud.bigtable.client.Client>`
:param client: The client that owns the instance. Provides
authorization and a project ID.
:type location_id: str
:param location_id: ID of the location in which the instance will be
created. Required for instances which do not yet
exist.
:type display_name: str
:param display_name: (Optional) The display name for the instance in the
Cloud Console UI. (Must be between 4 and 30
characters.) If this value is not set in the
constructor, will fall back to the instance ID.
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the instance's
cluster; used to set up the instance's cluster.
"""
def __init__(self, instance_id, client,
location_id=_EXISTING_INSTANCE_LOCATION_ID,
display_name=None,
serve_nodes=DEFAULT_SERVE_NODES):
self.instance_id = instance_id
self.display_name = display_name or instance_id
self._cluster_location_id = location_id
self._cluster_serve_nodes = serve_nodes
self._client = client
def _update_from_pb(self, instance_pb):
"""Refresh self from the server-provided protobuf.
Helper for :meth:`from_pb` and :meth:`reload`.
"""
if not instance_pb.display_name: # Simple field (string)
raise ValueError('Instance protobuf does not contain display_name')
self.display_name = instance_pb.display_name
@classmethod
def from_pb(cls, instance_pb, client):
"""Creates a instance instance from a protobuf.
:type instance_pb: :class:`instance_pb2.Instance`
:param instance_pb: A instance protobuf object.
:type client: :class:`Client <gcloud.bigtable.client.Client>`
:param client: The client that owns the instance.
:rtype: :class:`Instance`
:returns: The instance parsed from the protobuf response.
:raises: :class:`ValueError <exceptions.ValueError>` if the instance
name does not match
``projects/{project}/instances/{instance_id}``
or if the parsed project ID does not match the project ID
on the client.
"""
match = _INSTANCE_NAME_RE.match(instance_pb.name)
if match is None:
raise ValueError('Instance protobuf name was not in the '
'expected format.', instance_pb.name)
if match.group('project') != client.project:
raise ValueError('Project ID on instance does not match the '
'project ID on the client')
instance_id = match.group('instance_id')
result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID)
result._update_from_pb(instance_pb)
return result
def copy(self):
"""Make a copy of this instance.
Copies the local data stored as simple types and copies the client
attached to this instance.
:rtype: :class:`.Instance`
:returns: A copy of the current instance.
"""
new_client = self._client.copy()
return self.__class__(self.instance_id, new_client,
self._cluster_location_id,
display_name=self.display_name)
@property
def name(self):
"""Instance name used in requests.
.. note::
This property will not change if ``instance_id`` does not,
but the return value is not cached.
The instance name is of the form
``"projects/{project}/instances/{instance_id}"``
:rtype: str
:returns: The instance name.
"""
return self._client.project_name + '/instances/' + self.instance_id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# NOTE: This does not compare the configuration values, such as
# the display_name. Instead, it only compares
# identifying values instance ID and client. This is
# intentional, since the same instance can be in different states
# if not synchronized. Instances with similar instance
# settings but different clients can't be used in the same way.
return (other.instance_id == self.instance_id and
other._client == self._client)
def __ne__(self, other):
return not self.__eq__(other)
def reload(self):
"""Reload the metadata for this instance."""
request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name)
# We expect `data_v2_pb2.Instance`.
instance_pb = self._client._instance_stub.GetInstance(
request_pb, self._client.timeout_seconds)
# NOTE: _update_from_pb does not check that the project and
# instance ID on the response match the request.
self._update_from_pb(instance_pb)
def create(self):
"""Create this instance.
.. note::
Uses the ``project`` and ``instance_id`` on the current
:class:`Instance` in addition to the ``display_name``.
To change them before creating, reset the values via
.. code:: python
instance.display_name = 'New display name'
instance.instance_id = 'i-changed-my-mind'
before calling :meth:`create`.
:rtype: :class:`Operation`
:returns: The long-running operation corresponding to the
create operation.
"""
request_pb = _prepare_create_request(self)
# We expect a `google.longrunning.operations_pb2.Operation`.
operation_pb = self._client._instance_stub.CreateInstance(
request_pb, self._client.timeout_seconds)
op_id, loc_id, op_begin = _process_operation(operation_pb)
return Operation('create', op_id, op_begin, loc_id, instance=self)
def update(self):
"""Update this instance.
.. note::
Updates the ``display_name``. To change that value before
updating, reset its values via
.. code:: python
instance.display_name = 'New display name'
before calling :meth:`update`.
"""
request_pb = data_v2_pb2.Instance(
name=self.name,
display_name=self.display_name,
)
# Ignore the expected `data_v2_pb2.Instance`.
self._client._instance_stub.UpdateInstance(
request_pb, self._client.timeout_seconds)
def delete(self):
"""Delete this instance.
Marks a instance and all of its tables for permanent deletion
in 7 days.
Immediately upon completion of the request:
* Billing will cease for all of the instance's reserved resources.
* The instance's ``delete_time`` field will be set 7 days in
the future.
Soon afterward:
* All tables within the instance will become unavailable.
Prior to the instance's ``delete_time``:
* The instance can be recovered with a call to ``UndeleteInstance``.
* All other attempts to modify or delete the instance will be rejected.
At the instance's ``delete_time``:
* The instance and **all of its tables** will immediately and
irrevocably disappear from the API, and their data will be
permanently deleted.
"""
request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name)
# We expect a `google.protobuf.empty_pb2.Empty`
self._client._instance_stub.DeleteInstance(
request_pb, self._client.timeout_seconds)
def cluster(self, cluster_id, serve_nodes=3):
"""Factory to create a cluster associated with this client.
:type cluster_id: str
:param cluster_id: The ID of the cluster.
:type serve_nodes: int
:param serve_nodes: (Optional) The number of nodes in the cluster.
Defaults to 3.
:rtype: :class:`.Cluster`
:returns: The cluster owned by this client.
"""
return Cluster(cluster_id, self, serve_nodes=serve_nodes)
def list_clusters(self):
"""Lists clusters in this instance.
:rtype: tuple
:returns: A pair of results, the first is a list of :class:`.Cluster` s
returned and the second is a list of strings (the failed
locations in the request).
"""
request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name)
# We expect a `.cluster_messages_v1_pb2.ListClustersResponse`
list_clusters_response = self._client._instance_stub.ListClusters(
request_pb, self._client.timeout_seconds)
failed_locations = [
location for location in list_clusters_response.failed_locations]
clusters = [Cluster.from_pb(cluster_pb, self)
for cluster_pb in list_clusters_response.clusters]
return clusters, failed_locations
def table(self, table_id):
"""Factory to create a table associated with this instance.
:type table_id: str
:param table_id: The ID of the table.
:rtype: :class:`Table <gcloud.bigtable.table.Table>`
:returns: The table owned by this instance.
"""
return Table(table_id, self)
def list_tables(self):
"""List the tables in this instance.
:rtype: list of :class:`Table <gcloud.bigtable.table.Table>`
:returns: The list of tables owned by the instance.
:raises: :class:`ValueError <exceptions.ValueError>` if one of the
returned tables has a name that is not of the expected format.
"""
request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name)
# We expect a `table_messages_v2_pb2.ListTablesResponse`
table_list_pb = self._client._table_stub.ListTables(
request_pb, self._client.timeout_seconds)
result = []
for table_pb in table_list_pb.tables:
table_prefix = self.name + '/tables/'
if not table_pb.name.startswith(table_prefix):
raise ValueError('Table name %s not of expected format' % (
table_pb.name,))
table_id = table_pb.name[len(table_prefix):]
result.append(self.table(table_id))
return result

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,889 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Row."""
import struct
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import _to_bytes
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
_PACK_I64 = struct.Struct('>q').pack
MAX_MUTATIONS = 100000
"""The maximum number of mutations that a row can accumulate."""
class Row(object):
"""Base representation of a Google Cloud Bigtable Row.
This class has three subclasses corresponding to the three
RPC methods for sending row mutations:
* :class:`DirectRow` for ``MutateRow``
* :class:`ConditionalRow` for ``CheckAndMutateRow``
* :class:`AppendRow` for ``ReadModifyWriteRow``
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
self._row_key = _to_bytes(row_key)
self._table = table
class _SetDeleteRow(Row):
"""Row helper for setting or deleting cell values.
Implements helper methods to add mutations to set or delete cell contents:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
ALL_COLUMNS = object()
"""Sentinel value used to indicate all columns in a column family."""
def _get_mutations(self, state):
"""Gets the list of mutations for a given state.
This method intended to be implemented by subclasses.
``state`` may not need to be used by all subclasses.
:type state: bool
:param state: The state that the mutation should be
applied in.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always.
"""
raise NotImplementedError
def _set_cell(self, column_family_id, column, value, timestamp=None,
state=None):
"""Helper for :meth:`set_cell`
Adds a mutation to set the value in a specific cell.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
column = _to_bytes(column)
if isinstance(value, six.integer_types):
value = _PACK_I64(value)
value = _to_bytes(value)
if timestamp is None:
# Use -1 for current Bigtable server time.
timestamp_micros = -1
else:
timestamp_micros = _microseconds_from_datetime(timestamp)
# Truncate to millisecond granularity.
timestamp_micros -= (timestamp_micros % 1000)
mutation_val = data_v2_pb2.Mutation.SetCell(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=timestamp_micros,
value=value,
)
mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val)
self._get_mutations(state).append(mutation_pb)
def _delete(self, state=None):
"""Helper for :meth:`delete`
Adds a delete mutation (for the entire row) to the accumulated
mutations.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutation_val = data_v2_pb2.Mutation.DeleteFromRow()
mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val)
self._get_mutations(state).append(mutation_pb)
def _delete_cells(self, column_family_id, columns, time_range=None,
state=None):
"""Helper for :meth:`delete_cell` and :meth:`delete_cells`.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that is passed along to
:meth:`_get_mutations`.
"""
mutations_list = self._get_mutations(state)
if columns is self.ALL_COLUMNS:
mutation_val = data_v2_pb2.Mutation.DeleteFromFamily(
family_name=column_family_id,
)
mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val)
mutations_list.append(mutation_pb)
else:
delete_kwargs = {}
if time_range is not None:
delete_kwargs['time_range'] = time_range.to_pb()
to_append = []
for column in columns:
column = _to_bytes(column)
# time_range will never change if present, but the rest of
# delete_kwargs will
delete_kwargs.update(
family_name=column_family_id,
column_qualifier=column,
)
mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(
**delete_kwargs)
mutation_pb = data_v2_pb2.Mutation(
delete_from_column=mutation_val)
to_append.append(mutation_pb)
# We don't add the mutations until all columns have been
# processed without error.
mutations_list.extend(to_append)
class DirectRow(_SetDeleteRow):
"""Google Cloud Bigtable Row for sending "direct" mutations.
These mutations directly set or delete cell contents:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
These methods can be used directly::
>>> row = table.row(b'row-key1')
>>> row.set_cell(u'fam', b'col1', b'cell-val')
>>> row.delete_cell(u'fam', b'col2')
.. note::
A :class:`DirectRow` accumulates mutations locally via the
:meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and
:meth:`delete_cells` methods. To actually send these mutations to the
Google Cloud Bigtable API, you must call :meth:`commit`.
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
super(DirectRow, self).__init__(row_key, table)
self._pb_mutations = []
def _get_mutations(self, state): # pylint: disable=unused-argument
"""Gets the list of mutations for a given state.
``state`` is unused by :class:`DirectRow` but is used by
subclasses.
:type state: bool
:param state: The state that the mutation should be
applied in.
:rtype: list
:returns: The list to add new mutations to (for the current state).
"""
return self._pb_mutations
def set_cell(self, column_family_id, column, value, timestamp=None):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this :class:`DirectRow`
and the ``column``. The ``column`` must be in an existing
:class:`.ColumnFamily` (as determined by ``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
"""
self._set_cell(column_family_id, column, value, timestamp=timestamp,
state=None)
def delete(self):
"""Deletes this row from the table.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
"""
self._delete(state=None)
def delete_cell(self, column_family_id, column, time_range=None):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
"""
self._delete_cells(column_family_id, [column], time_range=time_range,
state=None)
def delete_cells(self, column_family_id, columns, time_range=None):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then
the entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
"""
self._delete_cells(column_family_id, columns, time_range=time_range,
state=None)
def commit(self):
"""Makes a ``MutateRow`` API request.
If no mutations have been created in the row, no request is made.
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations to an empty list.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
mutations_list = self._get_mutations(None)
num_mutations = len(mutations_list)
if num_mutations == 0:
return
if num_mutations > MAX_MUTATIONS:
raise ValueError('%d total mutations exceed the maximum allowable '
'%d.' % (num_mutations, MAX_MUTATIONS))
request_pb = messages_v2_pb2.MutateRowRequest(
table_name=self._table.name,
row_key=self._row_key,
mutations=mutations_list,
)
# We expect a `google.protobuf.empty_pb2.Empty`
client = self._table._instance._client
client._data_stub.MutateRow(request_pb, client.timeout_seconds)
self.clear()
def clear(self):
"""Removes all currently accumulated mutations on the current row."""
del self._pb_mutations[:]
class ConditionalRow(_SetDeleteRow):
"""Google Cloud Bigtable Row for sending mutations conditionally.
Each mutation has an associated state: :data:`True` or :data:`False`.
When :meth:`commit`-ed, the mutations for the :data:`True`
state will be applied if the filter matches any cells in
the row, otherwise the :data:`False` state will be applied.
A :class:`ConditionalRow` accumulates mutations in the same way a
:class:`DirectRow` does:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
with the only change the extra ``state`` parameter::
>>> row_cond = table.row(b'row-key2', filter_=row_filter)
>>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True)
>>> row_cond.delete_cell(u'fam', b'col', state=False)
.. note::
As with :class:`DirectRow`, to actually send these mutations to the
Google Cloud Bigtable API, you must call :meth:`commit`.
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
:type filter_: :class:`.RowFilter`
:param filter_: Filter to be used for conditional mutations.
"""
def __init__(self, row_key, table, filter_):
super(ConditionalRow, self).__init__(row_key, table)
self._filter = filter_
self._true_pb_mutations = []
self._false_pb_mutations = []
def _get_mutations(self, state):
"""Gets the list of mutations for a given state.
Over-ridden so that the state can be used in:
* :meth:`set_cell`
* :meth:`delete`
* :meth:`delete_cell`
* :meth:`delete_cells`
:type state: bool
:param state: The state that the mutation should be
applied in.
:rtype: list
:returns: The list to add new mutations to (for the current state).
"""
if state:
return self._true_pb_mutations
else:
return self._false_pb_mutations
def commit(self):
"""Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
true_mutations = self._get_mutations(state=True)
false_mutations = self._get_mutations(state=False)
num_true_mutations = len(true_mutations)
num_false_mutations = len(false_mutations)
if num_true_mutations == 0 and num_false_mutations == 0:
return
if (num_true_mutations > MAX_MUTATIONS or
num_false_mutations > MAX_MUTATIONS):
raise ValueError(
'Exceed the maximum allowable mutations (%d). Had %s true '
'mutations and %d false mutations.' % (
MAX_MUTATIONS, num_true_mutations, num_false_mutations))
request_pb = messages_v2_pb2.CheckAndMutateRowRequest(
table_name=self._table.name,
row_key=self._row_key,
predicate_filter=self._filter.to_pb(),
true_mutations=true_mutations,
false_mutations=false_mutations,
)
# We expect a `.messages_v2_pb2.CheckAndMutateRowResponse`
client = self._table._instance._client
resp = client._data_stub.CheckAndMutateRow(
request_pb, client.timeout_seconds)
self.clear()
return resp.predicate_matched
# pylint: disable=arguments-differ
def set_cell(self, column_family_id, column, value, timestamp=None,
state=True):
"""Sets a value in this row.
The cell is determined by the ``row_key`` of this
:class:`ConditionalRow` and the ``column``. The ``column`` must be in
an existing :class:`.ColumnFamily` (as determined by
``column_family_id``).
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes or :class:`int`
:param value: The value to set in the cell. If an integer is used,
will be interpreted as a 64-bit big-endian signed
integer (8 bytes).
:type timestamp: :class:`datetime.datetime`
:param timestamp: (Optional) The timestamp of the operation.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._set_cell(column_family_id, column, value, timestamp=timestamp,
state=state)
def delete(self, state=True):
"""Deletes this row from the table.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete(state=state)
def delete_cell(self, column_family_id, column, time_range=None,
state=True):
"""Deletes cell in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family that will have a
cell deleted.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete_cells(column_family_id, [column], time_range=time_range,
state=state)
def delete_cells(self, column_family_id, columns, time_range=None,
state=True):
"""Deletes cells in this row.
.. note::
This method adds a mutation to the accumulated mutations on this
row, but does not make an API request. To actually
send an API request (with the mutations) to the Google Cloud
Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column
or columns with cells being deleted. Must be
of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type columns: :class:`list` of :class:`str` /
:func:`unicode <unicode>`, or :class:`object`
:param columns: The columns within the column family that will have
cells deleted. If :attr:`ALL_COLUMNS` is used then the
entire column family will be deleted from the row.
:type time_range: :class:`TimestampRange`
:param time_range: (Optional) The range of time within which cells
should be deleted.
:type state: bool
:param state: (Optional) The state that the mutation should be
applied in. Defaults to :data:`True`.
"""
self._delete_cells(column_family_id, columns, time_range=time_range,
state=state)
# pylint: enable=arguments-differ
def clear(self):
"""Removes all currently accumulated mutations on the current row."""
del self._true_pb_mutations[:]
del self._false_pb_mutations[:]
class AppendRow(Row):
"""Google Cloud Bigtable Row for sending append mutations.
These mutations are intended to augment the value of an existing cell
and uses the methods:
* :meth:`append_cell_value`
* :meth:`increment_cell_value`
The first works by appending bytes and the second by incrementing an
integer (stored in the cell as 8 bytes). In either case, if the
cell is empty, assumes the default empty value (empty string for
bytes or and 0 for integer).
:type row_key: bytes
:param row_key: The key for the current row.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the row.
"""
def __init__(self, row_key, table):
super(AppendRow, self).__init__(row_key, table)
self._rule_pb_list = []
def clear(self):
"""Removes all currently accumulated modifications on current row."""
del self._rule_pb_list[:]
def append_cell_value(self, column_family_id, column, value):
"""Appends a value to an existing cell.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type value: bytes
:param value: The value to append to the existing value in the cell. If
the targeted cell is unset, it will be treated as
containing the empty string.
"""
column = _to_bytes(column)
value = _to_bytes(value)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id,
column_qualifier=column,
append_value=value)
self._rule_pb_list.append(rule_pb)
def increment_cell_value(self, column_family_id, column, int_value):
"""Increments a value in an existing cell.
Assumes the value in the cell is stored as a 64 bit integer
serialized to bytes.
.. note::
This method adds a read-modify rule protobuf to the accumulated
read-modify rules on this row, but does not make an API
request. To actually send an API request (with the rules) to the
Google Cloud Bigtable API, call :meth:`commit`.
:type column_family_id: str
:param column_family_id: The column family that contains the column.
Must be of the form
``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type column: bytes
:param column: The column within the column family where the cell
is located.
:type int_value: int
:param int_value: The value to increment the existing value in the cell
by. If the targeted cell is unset, it will be treated
as containing a zero. Otherwise, the targeted cell
must contain an 8-byte value (interpreted as a 64-bit
big-endian signed integer), or the entire request
will fail.
"""
column = _to_bytes(column)
rule_pb = data_v2_pb2.ReadModifyWriteRule(
family_name=column_family_id,
column_qualifier=column,
increment_amount=int_value)
self._rule_pb_list.append(rule_pb)
def commit(self):
"""Makes a ``ReadModifyWriteRow`` API request.
This commits modifications made by :meth:`append_cell_value` and
:meth:`increment_cell_value`. If no modifications were made, makes
no API request and just returns ``{}``.
Modifies a row atomically, reading the latest existing
timestamp / value from the specified columns and writing a new value by
appending / incrementing. The new cell created uses either the current
server time or the highest timestamp of a cell in that column (if it
exceeds the server time).
After committing the accumulated mutations, resets the local mutations.
.. code:: python
>>> append_row.commit()
{
u'col-fam-id': {
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
},
u'col-fam-id2': {
b'col-name3-but-other-fam': [
(b'foo', datetime.datetime(...)),
],
},
}
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell.
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
num_mutations = len(self._rule_pb_list)
if num_mutations == 0:
return {}
if num_mutations > MAX_MUTATIONS:
raise ValueError('%d total append mutations exceed the maximum '
'allowable %d.' % (num_mutations, MAX_MUTATIONS))
request_pb = messages_v2_pb2.ReadModifyWriteRowRequest(
table_name=self._table.name,
row_key=self._row_key,
rules=self._rule_pb_list,
)
# We expect a `.data_v2_pb2.Row`
client = self._table._instance._client
row_response = client._data_stub.ReadModifyWriteRow(
request_pb, client.timeout_seconds)
# Reset modifications after commit-ing request.
self.clear()
# NOTE: We expect row_response.key == self._row_key but don't check.
return _parse_rmw_row_response(row_response)
def _parse_rmw_row_response(row_response):
"""Parses the response to a ``ReadModifyWriteRow`` request.
:type row_response: :class:`.data_v2_pb2.Row`
:param row_response: The response row (with only modified cells) from a
``ReadModifyWriteRow`` request.
:rtype: dict
:returns: The new contents of all modified cells. Returned as a
dictionary of column families, each of which holds a
dictionary of columns. Each column contains a list of cells
modified. Each cell is represented with a two-tuple with the
value (in bytes) and the timestamp for the cell. For example:
.. code:: python
{
u'col-fam-id': {
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
},
u'col-fam-id2': {
b'col-name3-but-other-fam': [
(b'foo', datetime.datetime(...)),
],
},
}
"""
result = {}
for column_family in row_response.row.families:
column_family_id, curr_family = _parse_family_pb(column_family)
result[column_family_id] = curr_family
return result
def _parse_family_pb(family_pb):
"""Parses a Family protobuf into a dictionary.
:type family_pb: :class:`._generated_v2.data_pb2.Family`
:param family_pb: A protobuf
:rtype: tuple
:returns: A string and dictionary. The string is the name of the
column family and the dictionary has column names (within the
family) as keys and cell lists as values. Each cell is
represented with a two-tuple with the value (in bytes) and the
timestamp for the cell. For example:
.. code:: python
{
b'col-name1': [
(b'cell-val', datetime.datetime(...)),
(b'cell-val-newer', datetime.datetime(...)),
],
b'col-name2': [
(b'altcol-cell-val', datetime.datetime(...)),
],
}
"""
result = {}
for column in family_pb.columns:
result[column.qualifier] = cells = []
for cell in column.cells:
val_pair = (
cell.value,
_datetime_from_microseconds(cell.timestamp_micros),
)
cells.append(val_pair)
return family_pb.name, result

View file

@ -0,0 +1,442 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Container for Google Cloud Bigtable Cells and Streaming Row Contents."""
import copy
import six
from gcloud._helpers import _datetime_from_microseconds
from gcloud._helpers import _to_bytes
class Cell(object):
"""Representation of a Google Cloud Bigtable Cell.
:type value: bytes
:param value: The value stored in the cell.
:type timestamp: :class:`datetime.datetime`
:param timestamp: The timestamp when the cell was stored.
:type labels: list
:param labels: (Optional) List of strings. Labels applied to the cell.
"""
def __init__(self, value, timestamp, labels=()):
self.value = value
self.timestamp = timestamp
self.labels = list(labels)
@classmethod
def from_pb(cls, cell_pb):
"""Create a new cell from a Cell protobuf.
:type cell_pb: :class:`._generated_v2.data_pb2.Cell`
:param cell_pb: The protobuf to convert.
:rtype: :class:`Cell`
:returns: The cell corresponding to the protobuf.
"""
timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros)
if cell_pb.labels:
return cls(cell_pb.value, timestamp, labels=cell_pb.labels)
else:
return cls(cell_pb.value, timestamp)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.value == self.value and
other.timestamp == self.timestamp and
other.labels == self.labels)
def __ne__(self, other):
return not self.__eq__(other)
class PartialCellData(object):
"""Representation of partial cell in a Google Cloud Bigtable Table.
These are expected to be updated directly from a
:class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
:type row_key: bytes
:param row_key: The key for the row holding the (partial) cell.
:type family_name: str
:param family_name: The family name of the (partial) cell.
:type qualifier: bytes
:param qualifier: The column qualifier of the (partial) cell.
:type timestamp_micros: int
:param timestamp_micros: The timestamp (in microsecods) of the
(partial) cell.
:type labels: list of str
:param labels: labels assigned to the (partial) cell
:type value: bytes
:param value: The (accumulated) value of the (partial) cell.
"""
def __init__(self, row_key, family_name, qualifier, timestamp_micros,
labels=(), value=b''):
self.row_key = row_key
self.family_name = family_name
self.qualifier = qualifier
self.timestamp_micros = timestamp_micros
self.labels = labels
self.value = value
def append_value(self, value):
"""Append bytes from a new chunk to value.
:type value: bytes
:param value: bytes to append
"""
self.value += value
class PartialRowData(object):
"""Representation of partial row in a Google Cloud Bigtable Table.
These are expected to be updated directly from a
:class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
:type row_key: bytes
:param row_key: The key for the row holding the (partial) data.
"""
def __init__(self, row_key):
self._row_key = row_key
self._cells = {}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other._row_key == self._row_key and
other._cells == self._cells)
def __ne__(self, other):
return not self.__eq__(other)
def to_dict(self):
"""Convert the cells to a dictionary.
This is intended to be used with HappyBase, so the column family and
column qualiers are combined (with ``:``).
:rtype: dict
:returns: Dictionary containing all the data in the cells of this row.
"""
result = {}
for column_family_id, columns in six.iteritems(self._cells):
for column_qual, cells in six.iteritems(columns):
key = (_to_bytes(column_family_id) + b':' +
_to_bytes(column_qual))
result[key] = cells
return result
@property
def cells(self):
"""Property returning all the cells accumulated on this partial row.
:rtype: dict
:returns: Dictionary of the :class:`Cell` objects accumulated. This
dictionary has two-levels of keys (first for column families
and second for column names/qualifiers within a family). For
a given column, a list of :class:`Cell` objects is stored.
"""
return copy.deepcopy(self._cells)
@property
def row_key(self):
"""Getter for the current (partial) row's key.
:rtype: bytes
:returns: The current (partial) row's key.
"""
return self._row_key
class InvalidReadRowsResponse(RuntimeError):
"""Exception raised to to invalid response data from back-end."""
class InvalidChunk(RuntimeError):
"""Exception raised to to invalid chunk data from back-end."""
class PartialRowsData(object):
"""Convenience wrapper for consuming a ``ReadRows`` streaming response.
:type response_iterator:
:class:`grpc.framework.alpha._reexport._CancellableIterator`
:param response_iterator: A streaming iterator returned from a
``ReadRows`` request.
"""
START = "Start" # No responses yet processed.
NEW_ROW = "New row" # No cells yet complete for row
ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row
CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row
def __init__(self, response_iterator):
self._response_iterator = response_iterator
# Fully-processed rows, keyed by `row_key`
self._rows = {}
# Counter for responses pulled from iterator
self._counter = 0
# Maybe cached from previous response
self._last_scanned_row_key = None
# In-progress row, unset until first response, after commit/reset
self._row = None
# Last complete row, unset until first commit
self._previous_row = None
# In-progress cell, unset until first response, after completion
self._cell = None
# Last complete cell, unset until first completion, after new row
self._previous_cell = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other._response_iterator == self._response_iterator
def __ne__(self, other):
return not self.__eq__(other)
@property
def state(self):
"""State machine state.
:rtype: str
:returns: name of state corresponding to currrent row / chunk
processing.
"""
if self._last_scanned_row_key is None:
return self.START
if self._row is None:
assert self._cell is None
assert self._previous_cell is None
return self.NEW_ROW
if self._cell is not None:
return self.CELL_IN_PROGRESS
if self._previous_cell is not None:
return self.ROW_IN_PROGRESS
return self.NEW_ROW # row added, no chunk yet processed
@property
def rows(self):
"""Property returning all rows accumulated from the stream.
:rtype: dict
:returns: row_key -> :class:`PartialRowData`.
"""
# NOTE: To avoid duplicating large objects, this is just the
# mutable private data.
return self._rows
def cancel(self):
"""Cancels the iterator, closing the stream."""
self._response_iterator.cancel()
def consume_next(self):
"""Consume the next ``ReadRowsResponse`` from the stream.
Parse the response and its chunks into a new/existing row in
:attr:`_rows`
"""
response = six.next(self._response_iterator)
self._counter += 1
if self._last_scanned_row_key is None: # first response
if response.last_scanned_row_key:
raise InvalidReadRowsResponse()
self._last_scanned_row_key = response.last_scanned_row_key
row = self._row
cell = self._cell
for chunk in response.chunks:
self._validate_chunk(chunk)
if chunk.reset_row:
row = self._row = None
cell = self._cell = self._previous_cell = None
continue
if row is None:
row = self._row = PartialRowData(chunk.row_key)
if cell is None:
cell = self._cell = PartialCellData(
chunk.row_key,
chunk.family_name.value,
chunk.qualifier.value,
chunk.timestamp_micros,
chunk.labels,
chunk.value)
self._copy_from_previous(cell)
else:
cell.append_value(chunk.value)
if chunk.commit_row:
self._save_current_row()
row = cell = None
continue
if chunk.value_size == 0:
self._save_current_cell()
cell = None
def consume_all(self, max_loops=None):
"""Consume the streamed responses until there are no more.
This simply calls :meth:`consume_next` until there are no
more to consume.
:type max_loops: int
:param max_loops: (Optional) Maximum number of times to try to consume
an additional ``ReadRowsResponse``. You can use this
to avoid long wait times.
"""
curr_loop = 0
if max_loops is None:
max_loops = float('inf')
while curr_loop < max_loops:
curr_loop += 1
try:
self.consume_next()
except StopIteration:
break
@staticmethod
def _validate_chunk_status(chunk):
"""Helper for :meth:`_validate_chunk_row_in_progress`, etc."""
# No reseet with other keys
if chunk.reset_row:
_raise_if(chunk.row_key)
_raise_if(chunk.HasField('family_name'))
_raise_if(chunk.HasField('qualifier'))
_raise_if(chunk.timestamp_micros)
_raise_if(chunk.labels)
_raise_if(chunk.value_size)
_raise_if(chunk.value)
# No commit with value size
_raise_if(chunk.commit_row and chunk.value_size > 0)
# No negative value_size (inferred as a general constraint).
_raise_if(chunk.value_size < 0)
def _validate_chunk_new_row(self, chunk):
"""Helper for :meth:`_validate_chunk`."""
assert self.state == self.NEW_ROW
_raise_if(chunk.reset_row)
_raise_if(not chunk.row_key)
_raise_if(not chunk.family_name)
_raise_if(not chunk.qualifier)
# This constraint is not enforced in the Go example.
_raise_if(chunk.value_size > 0 and chunk.commit_row is not False)
# This constraint is from the Go example, not the spec.
_raise_if(self._previous_row is not None and
chunk.row_key <= self._previous_row.row_key)
def _same_as_previous(self, chunk):
"""Helper for :meth:`_validate_chunk_row_in_progress`"""
previous = self._previous_cell
return (chunk.row_key == previous.row_key and
chunk.family_name == previous.family_name and
chunk.qualifier == previous.qualifier and
chunk.labels == previous.labels)
def _validate_chunk_row_in_progress(self, chunk):
"""Helper for :meth:`_validate_chunk`"""
assert self.state == self.ROW_IN_PROGRESS
self._validate_chunk_status(chunk)
if not chunk.HasField('commit_row') and not chunk.reset_row:
_raise_if(not chunk.timestamp_micros or not chunk.value)
_raise_if(chunk.row_key and
chunk.row_key != self._row.row_key)
_raise_if(chunk.HasField('family_name') and
not chunk.HasField('qualifier'))
previous = self._previous_cell
_raise_if(self._same_as_previous(chunk) and
chunk.timestamp_micros <= previous.timestamp_micros)
def _validate_chunk_cell_in_progress(self, chunk):
"""Helper for :meth:`_validate_chunk`"""
assert self.state == self.CELL_IN_PROGRESS
self._validate_chunk_status(chunk)
self._copy_from_current(chunk)
def _validate_chunk(self, chunk):
"""Helper for :meth:`consume_next`."""
if self.state == self.NEW_ROW:
self._validate_chunk_new_row(chunk)
if self.state == self.ROW_IN_PROGRESS:
self._validate_chunk_row_in_progress(chunk)
if self.state == self.CELL_IN_PROGRESS:
self._validate_chunk_cell_in_progress(chunk)
def _save_current_cell(self):
"""Helper for :meth:`consume_next`."""
row, cell = self._row, self._cell
family = row._cells.setdefault(cell.family_name, {})
qualified = family.setdefault(cell.qualifier, [])
complete = Cell.from_pb(self._cell)
qualified.append(complete)
self._cell, self._previous_cell = None, cell
def _copy_from_current(self, chunk):
"""Helper for :meth:`consume_next`."""
current = self._cell
if current is not None:
if not chunk.row_key:
chunk.row_key = current.row_key
if not chunk.HasField('family_name'):
chunk.family_name.value = current.family_name
if not chunk.HasField('qualifier'):
chunk.qualifier.value = current.qualifier
if not chunk.timestamp_micros:
chunk.timestamp_micros = current.timestamp_micros
if not chunk.labels:
chunk.labels.extend(current.labels)
def _copy_from_previous(self, cell):
"""Helper for :meth:`consume_next`."""
previous = self._previous_cell
if previous is not None:
if not cell.row_key:
cell.row_key = previous.row_key
if not cell.family_name:
cell.family_name = previous.family_name
if not cell.qualifier:
cell.qualifier = previous.qualifier
def _save_current_row(self):
"""Helper for :meth:`consume_next`."""
if self._cell:
self._save_current_cell()
self._rows[self._row.row_key] = self._row
self._row, self._previous_row = None, self._row
self._previous_cell = None
def _raise_if(predicate, *args):
"""Helper for validation methods."""
if predicate:
raise InvalidChunk(*args)

View file

@ -0,0 +1,768 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters for Google Cloud Bigtable Row classes."""
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import _to_bytes
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
class RowFilter(object):
"""Basic filter to apply to cells in a row.
These values can be combined via :class:`RowFilterChain`,
:class:`RowFilterUnion` and :class:`ConditionalRowFilter`.
.. note::
This class is a do-nothing base class for all row filters.
"""
def __ne__(self, other):
return not self.__eq__(other)
class _BoolFilter(RowFilter):
"""Row filter that uses a boolean flag.
:type flag: bool
:param flag: An indicator if a setting is turned on or off.
"""
def __init__(self, flag):
self.flag = flag
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.flag == self.flag
class SinkFilter(_BoolFilter):
"""Advanced row filter to skip parent filters.
:type flag: bool
:param flag: ADVANCED USE ONLY. Hook for introspection into the row filter.
Outputs all cells directly to the output of the read rather
than to any parent filter. Cannot be used within the
``predicate_filter``, ``true_filter``, or ``false_filter``
of a :class:`ConditionalRowFilter`.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(sink=self.flag)
class PassAllFilter(_BoolFilter):
"""Row filter equivalent to not filtering at all.
:type flag: bool
:param flag: Matches all cells, regardless of input. Functionally
equivalent to leaving ``filter`` unset, but included for
completeness.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(pass_all_filter=self.flag)
class BlockAllFilter(_BoolFilter):
"""Row filter that doesn't match any cells.
:type flag: bool
:param flag: Does not match any cells, regardless of input. Useful for
temporarily disabling just part of a filter.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(block_all_filter=self.flag)
class _RegexFilter(RowFilter):
"""Row filter that uses a regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
:type regex: bytes or str
:param regex: A regular expression (RE2) for some row filter.
"""
def __init__(self, regex):
self.regex = _to_bytes(regex)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.regex == self.regex
class RowKeyRegexFilter(_RegexFilter):
"""Row filter for a row key regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells from rows with row
keys that satisfy this regex. For a
``CheckAndMutateRowRequest``, this filter is unnecessary
since the row key is already specified.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex)
class RowSampleFilter(RowFilter):
"""Matches all cells from a row with probability p.
:type sample: float
:param sample: The probability of matching a cell (must be in the
interval ``[0, 1]``).
"""
def __init__(self, sample):
self.sample = sample
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.sample == self.sample
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(row_sample_filter=self.sample)
class FamilyNameRegexFilter(_RegexFilter):
"""Row filter for a family name regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
:type regex: str
:param regex: A regular expression (RE2) to match cells from columns in a
given column family. For technical reasons, the regex must
not contain the ``':'`` character, even if it is not being
used as a literal.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex)
class ColumnQualifierRegexFilter(_RegexFilter):
"""Row filter for a column qualifier regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells from column that
match this regex (irrespective of column family).
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex)
class TimestampRange(object):
"""Range of time with inclusive lower and exclusive upper bounds.
:type start: :class:`datetime.datetime`
:param start: (Optional) The (inclusive) lower bound of the timestamp
range. If omitted, defaults to Unix epoch.
:type end: :class:`datetime.datetime`
:param end: (Optional) The (exclusive) upper bound of the timestamp
range. If omitted, no upper bound is used.
"""
def __init__(self, start=None, end=None):
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.start == self.start and
other.end == self.end)
def __ne__(self, other):
return not self.__eq__(other)
def to_pb(self):
"""Converts the :class:`TimestampRange` to a protobuf.
:rtype: :class:`.data_v2_pb2.TimestampRange`
:returns: The converted current object.
"""
timestamp_range_kwargs = {}
if self.start is not None:
timestamp_range_kwargs['start_timestamp_micros'] = (
_microseconds_from_datetime(self.start))
if self.end is not None:
timestamp_range_kwargs['end_timestamp_micros'] = (
_microseconds_from_datetime(self.end))
return data_v2_pb2.TimestampRange(**timestamp_range_kwargs)
class TimestampRangeFilter(RowFilter):
"""Row filter that limits cells to a range of time.
:type range_: :class:`TimestampRange`
:param range_: Range of time that cells should match against.
"""
def __init__(self, range_):
self.range_ = range_
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.range_ == self.range_
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts the ``range_`` on the current object to a protobuf and
then uses it in the ``timestamp_range_filter`` field.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
timestamp_range_filter=self.range_.to_pb())
class ColumnRangeFilter(RowFilter):
"""A row filter to restrict to a range of columns.
Both the start and end column can be included or excluded in the range.
By default, we include them both, but this can be changed with optional
flags.
:type column_family_id: str
:param column_family_id: The column family that contains the columns. Must
be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type start_column: bytes
:param start_column: The start of the range of columns. If no value is
used, the backend applies no upper bound to the
values.
:type end_column: bytes
:param end_column: The end of the range of columns. If no value is used,
the backend applies no upper bound to the values.
:type inclusive_start: bool
:param inclusive_start: Boolean indicating if the start column should be
included in the range (or excluded). Defaults
to :data:`True` if ``start_column`` is passed and
no ``inclusive_start`` was given.
:type inclusive_end: bool
:param inclusive_end: Boolean indicating if the end column should be
included in the range (or excluded). Defaults
to :data:`True` if ``end_column`` is passed and
no ``inclusive_end`` was given.
:raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start``
is set but no ``start_column`` is given or if ``inclusive_end``
is set but no ``end_column`` is given
"""
def __init__(self, column_family_id, start_column=None, end_column=None,
inclusive_start=None, inclusive_end=None):
self.column_family_id = column_family_id
if inclusive_start is None:
inclusive_start = True
elif start_column is None:
raise ValueError('Inclusive start was specified but no '
'start column was given.')
self.start_column = start_column
self.inclusive_start = inclusive_start
if inclusive_end is None:
inclusive_end = True
elif end_column is None:
raise ValueError('Inclusive end was specified but no '
'end column was given.')
self.end_column = end_column
self.inclusive_end = inclusive_end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.column_family_id == self.column_family_id and
other.start_column == self.start_column and
other.end_column == self.end_column and
other.inclusive_start == self.inclusive_start and
other.inclusive_end == self.inclusive_end)
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it
in the ``column_range_filter`` field.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
column_range_kwargs = {'family_name': self.column_family_id}
if self.start_column is not None:
if self.inclusive_start:
key = 'start_qualifier_closed'
else:
key = 'start_qualifier_open'
column_range_kwargs[key] = _to_bytes(self.start_column)
if self.end_column is not None:
if self.inclusive_end:
key = 'end_qualifier_closed'
else:
key = 'end_qualifier_open'
column_range_kwargs[key] = _to_bytes(self.end_column)
column_range = data_v2_pb2.ColumnRange(**column_range_kwargs)
return data_v2_pb2.RowFilter(column_range_filter=column_range)
class ValueRegexFilter(_RegexFilter):
"""Row filter for a value regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells with values that
match this regex.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(value_regex_filter=self.regex)
class ValueRangeFilter(RowFilter):
"""A range of values to restrict to in a row filter.
Will only match cells that have values in this range.
Both the start and end value can be included or excluded in the range.
By default, we include them both, but this can be changed with optional
flags.
:type start_value: bytes
:param start_value: The start of the range of values. If no value is used,
the backend applies no lower bound to the values.
:type end_value: bytes
:param end_value: The end of the range of values. If no value is used,
the backend applies no upper bound to the values.
:type inclusive_start: bool
:param inclusive_start: Boolean indicating if the start value should be
included in the range (or excluded). Defaults
to :data:`True` if ``start_value`` is passed and
no ``inclusive_start`` was given.
:type inclusive_end: bool
:param inclusive_end: Boolean indicating if the end value should be
included in the range (or excluded). Defaults
to :data:`True` if ``end_value`` is passed and
no ``inclusive_end`` was given.
:raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start``
is set but no ``start_value`` is given or if ``inclusive_end``
is set but no ``end_value`` is given
"""
def __init__(self, start_value=None, end_value=None,
inclusive_start=None, inclusive_end=None):
if inclusive_start is None:
inclusive_start = True
elif start_value is None:
raise ValueError('Inclusive start was specified but no '
'start value was given.')
self.start_value = start_value
self.inclusive_start = inclusive_start
if inclusive_end is None:
inclusive_end = True
elif end_value is None:
raise ValueError('Inclusive end was specified but no '
'end value was given.')
self.end_value = end_value
self.inclusive_end = inclusive_end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.start_value == self.start_value and
other.end_value == self.end_value and
other.inclusive_start == self.inclusive_start and
other.inclusive_end == self.inclusive_end)
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts to a :class:`.data_v2_pb2.ValueRange` and then uses
it to create a row filter protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
value_range_kwargs = {}
if self.start_value is not None:
if self.inclusive_start:
key = 'start_value_closed'
else:
key = 'start_value_open'
value_range_kwargs[key] = _to_bytes(self.start_value)
if self.end_value is not None:
if self.inclusive_end:
key = 'end_value_closed'
else:
key = 'end_value_open'
value_range_kwargs[key] = _to_bytes(self.end_value)
value_range = data_v2_pb2.ValueRange(**value_range_kwargs)
return data_v2_pb2.RowFilter(value_range_filter=value_range)
class _CellCountFilter(RowFilter):
"""Row filter that uses an integer count of cells.
The cell count is used as an offset or a limit for the number
of results returned.
:type num_cells: int
:param num_cells: An integer count / offset / limit.
"""
def __init__(self, num_cells):
self.num_cells = num_cells
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.num_cells == self.num_cells
class CellsRowOffsetFilter(_CellCountFilter):
"""Row filter to skip cells in a row.
:type num_cells: int
:param num_cells: Skips the first N cells of the row.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
cells_per_row_offset_filter=self.num_cells)
class CellsRowLimitFilter(_CellCountFilter):
"""Row filter to limit cells in a row.
:type num_cells: int
:param num_cells: Matches only the first N cells of the row.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells)
class CellsColumnLimitFilter(_CellCountFilter):
"""Row filter to limit cells in a column.
:type num_cells: int
:param num_cells: Matches only the most recent N cells within each column.
This filters a (family name, column) pair, based on
timestamps of each cell.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
cells_per_column_limit_filter=self.num_cells)
class StripValueTransformerFilter(_BoolFilter):
"""Row filter that transforms cells into empty string (0 bytes).
:type flag: bool
:param flag: If :data:`True`, replaces each cell's value with the empty
string. As the name indicates, this is more useful as a
transformer than a generic query / filter.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(strip_value_transformer=self.flag)
class ApplyLabelFilter(RowFilter):
"""Filter to apply labels to cells.
Intended to be used as an intermediate filter on a pre-existing filtered
result set. This way if two sets are combined, the label can tell where
the cell(s) originated.This allows the client to determine which results
were produced from which part of the filter.
.. note::
Due to a technical limitation of the backend, it is not currently
possible to apply multiple labels to a cell.
:type label: str
:param label: Label to apply to cells in the output row. Values must be
at most 15 characters long, and match the pattern
``[a-z0-9\\-]+``.
"""
def __init__(self, label):
self.label = label
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.label == self.label
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(apply_label_transformer=self.label)
class _FilterCombination(RowFilter):
"""Chain of row filters.
Sends rows through several filters in sequence. The filters are "chained"
together to process a row. After the first filter is applied, the second
is applied to the filtered output and so on for subsequent filters.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def __init__(self, filters=None):
if filters is None:
filters = []
self.filters = filters
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.filters == self.filters
class RowFilterChain(_FilterCombination):
"""Chain of row filters.
Sends rows through several filters in sequence. The filters are "chained"
together to process a row. After the first filter is applied, the second
is applied to the filtered output and so on for subsequent filters.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
chain = data_v2_pb2.RowFilter.Chain(
filters=[row_filter.to_pb() for row_filter in self.filters])
return data_v2_pb2.RowFilter(chain=chain)
class RowFilterUnion(_FilterCombination):
"""Union of row filters.
Sends rows through several filters simultaneously, then
merges / interleaves all the filtered results together.
If multiple cells are produced with the same column and timestamp,
they will all appear in the output row in an unspecified mutual order.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
interleave = data_v2_pb2.RowFilter.Interleave(
filters=[row_filter.to_pb() for row_filter in self.filters])
return data_v2_pb2.RowFilter(interleave=interleave)
class ConditionalRowFilter(RowFilter):
"""Conditional row filter which exhibits ternary behavior.
Executes one of two filters based on another filter. If the ``base_filter``
returns any cells in the row, then ``true_filter`` is executed. If not,
then ``false_filter`` is executed.
.. note::
The ``base_filter`` does not execute atomically with the true and false
filters, which may lead to inconsistent or unexpected results.
Additionally, executing a :class:`ConditionalRowFilter` has poor
performance on the server, especially when ``false_filter`` is set.
:type base_filter: :class:`RowFilter`
:param base_filter: The filter to condition on before executing the
true/false filters.
:type true_filter: :class:`RowFilter`
:param true_filter: (Optional) The filter to execute if there are any cells
matching ``base_filter``. If not provided, no results
will be returned in the true case.
:type false_filter: :class:`RowFilter`
:param false_filter: (Optional) The filter to execute if there are no cells
matching ``base_filter``. If not provided, no results
will be returned in the false case.
"""
def __init__(self, base_filter, true_filter=None, false_filter=None):
self.base_filter = base_filter
self.true_filter = true_filter
self.false_filter = false_filter
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.base_filter == self.base_filter and
other.true_filter == self.true_filter and
other.false_filter == self.false_filter)
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
condition_kwargs = {'predicate_filter': self.base_filter.to_pb()}
if self.true_filter is not None:
condition_kwargs['true_filter'] = self.true_filter.to_pb()
if self.false_filter is not None:
condition_kwargs['false_filter'] = self.false_filter.to_pb()
condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs)
return data_v2_pb2.RowFilter(condition=condition)

View file

@ -0,0 +1,379 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User friendly container for Google Cloud Bigtable Table."""
from gcloud._helpers import _to_bytes
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as data_messages_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2)
from gcloud.bigtable.column_family import _gc_rule_from_pb
from gcloud.bigtable.column_family import ColumnFamily
from gcloud.bigtable.row import AppendRow
from gcloud.bigtable.row import ConditionalRow
from gcloud.bigtable.row import DirectRow
from gcloud.bigtable.row_data import PartialRowsData
class Table(object):
"""Representation of a Google Cloud Bigtable Table.
.. note::
We don't define any properties on a table other than the name. As
the proto says, in a request:
The ``name`` field of the Table and all of its ColumnFamilies must
be left blank, and will be populated in the response.
This leaves only the ``current_operation`` and ``granularity``
fields. The ``current_operation`` is only used for responses while
``granularity`` is an enum with only one value.
We can use a :class:`Table` to:
* :meth:`create` the table
* :meth:`rename` the table
* :meth:`delete` the table
* :meth:`list_column_families` in the table
:type table_id: str
:param table_id: The ID of the table.
:type instance: :class:`Cluster <.instance.Instance>`
:param instance: The instance that owns the table.
"""
def __init__(self, table_id, instance):
self.table_id = table_id
self._instance = instance
@property
def name(self):
"""Table name used in requests.
.. note::
This property will not change if ``table_id`` does not, but the
return value is not cached.
The table name is of the form
``"projects/../zones/../clusters/../tables/{table_id}"``
:rtype: str
:returns: The table name.
"""
return self._instance.name + '/tables/' + self.table_id
def column_family(self, column_family_id, gc_rule=None):
"""Factory to create a column family associated with this table.
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type gc_rule: :class:`.GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
:rtype: :class:`.ColumnFamily`
:returns: A column family owned by this table.
"""
return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
def row(self, row_key, filter_=None, append=False):
"""Factory to create a row associated with this table.
.. warning::
At most one of ``filter_`` and ``append`` can be used in a
:class:`Row`.
:type row_key: bytes
:param row_key: The key for the row being created.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) Filter to be used for conditional mutations.
See :class:`.DirectRow` for more details.
:type append: bool
:param append: (Optional) Flag to determine if the row should be used
for append mutations.
:rtype: :class:`.DirectRow`
:returns: A row owned by this table.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``filter_`` and ``append`` are used.
"""
if append and filter_ is not None:
raise ValueError('At most one of filter_ and append can be set')
if append:
return AppendRow(row_key, self)
elif filter_ is not None:
return ConditionalRow(row_key, self, filter_=filter_)
else:
return DirectRow(row_key, self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.table_id == self.table_id and
other._instance == self._instance)
def __ne__(self, other):
return not self.__eq__(other)
def create(self, initial_split_keys=None):
"""Creates this table.
.. note::
Though a :class:`._generated_v2.table_pb2.Table` is also
allowed (as the ``table`` property) in a create table request, we
do not support it in this method. As mentioned in the
:class:`Table` docstring, the name is the only useful property in
the table proto.
.. note::
A create request returns a
:class:`._generated_v2.table_pb2.Table` but we don't use
this response. The proto definition allows for the inclusion of a
``current_operation`` in the response, but it does not appear that
the Cloud Bigtable API returns any operation.
:type initial_split_keys: list
:param initial_split_keys: (Optional) List of row keys that will be
used to initially split the table into
several tablets (Tablets are similar to
HBase regions). Given two split keys,
``"s1"`` and ``"s2"``, three tablets will be
created, spanning the key ranges:
``[, s1)``, ``[s1, s2)``, ``[s2, )``.
"""
split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split
if initial_split_keys is not None:
initial_split_keys = [
split_pb(key=key) for key in initial_split_keys]
request_pb = table_admin_messages_v2_pb2.CreateTableRequest(
initial_splits=initial_split_keys or [],
parent=self._instance.name,
table_id=self.table_id,
)
client = self._instance._client
# We expect a `._generated_v2.table_pb2.Table`
client._table_stub.CreateTable(request_pb, client.timeout_seconds)
def delete(self):
"""Delete this table."""
request_pb = table_admin_messages_v2_pb2.DeleteTableRequest(
name=self.name)
client = self._instance._client
# We expect a `google.protobuf.empty_pb2.Empty`
client._table_stub.DeleteTable(request_pb, client.timeout_seconds)
def list_column_families(self):
"""List the column families owned by this table.
:rtype: dict
:returns: Dictionary of column families attached to this table. Keys
are strings (column family names) and values are
:class:`.ColumnFamily` instances.
:raises: :class:`ValueError <exceptions.ValueError>` if the column
family name from the response does not agree with the computed
name from the column family ID.
"""
request_pb = table_admin_messages_v2_pb2.GetTableRequest(
name=self.name)
client = self._instance._client
# We expect a `._generated_v2.table_pb2.Table`
table_pb = client._table_stub.GetTable(request_pb,
client.timeout_seconds)
result = {}
for column_family_id, value_pb in table_pb.column_families.items():
gc_rule = _gc_rule_from_pb(value_pb.gc_rule)
column_family = self.column_family(column_family_id,
gc_rule=gc_rule)
result[column_family_id] = column_family
return result
def read_row(self, row_key, filter_=None):
"""Read a single row from this table.
:type row_key: bytes
:param row_key: The key of the row to read from.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
row. If unset, returns the entire row.
:rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>`
:returns: The contents of the row if any chunks were returned in
the response, otherwise :data:`None`.
:raises: :class:`ValueError <exceptions.ValueError>` if a commit row
chunk is never encountered.
"""
request_pb = _create_row_request(self.name, row_key=row_key,
filter_=filter_)
client = self._instance._client
response_iterator = client._data_stub.ReadRows(request_pb,
client.timeout_seconds)
rows_data = PartialRowsData(response_iterator)
rows_data.consume_all()
if rows_data.state not in (rows_data.NEW_ROW, rows_data.START):
raise ValueError('The row remains partial / is not committed.')
if len(rows_data.rows) == 0:
return None
return rows_data.rows[row_key]
def read_rows(self, start_key=None, end_key=None, limit=None,
filter_=None):
"""Read rows from this table.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads every column in
each row.
:rtype: :class:`.PartialRowsData`
:returns: A :class:`.PartialRowsData` convenience wrapper for consuming
the streamed results.
"""
request_pb = _create_row_request(
self.name, start_key=start_key, end_key=end_key, filter_=filter_,
limit=limit)
client = self._instance._client
response_iterator = client._data_stub.ReadRows(request_pb,
client.timeout_seconds)
# We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse`
return PartialRowsData(response_iterator)
def sample_row_keys(self):
"""Read a sample of row keys in the table.
The returned row keys will delimit contiguous sections of the table of
approximately equal size, which can be used to break up the data for
distributed tasks like mapreduces.
The elements in the iterator are a SampleRowKeys response and they have
the properties ``offset_bytes`` and ``row_key``. They occur in sorted
order. The table might have contents before the first row key in the
list and after the last one, but a key containing the empty string
indicates "end of table" and will be the last response given, if
present.
.. note::
Row keys in this list may not have ever been written to or read
from, and users should therefore not make any assumptions about the
row key structure that are specific to their use case.
The ``offset_bytes`` field on a response indicates the approximate
total storage space used by all rows in the table which precede
``row_key``. Buffering the contents of all rows between two subsequent
samples would require space roughly equal to the difference in their
``offset_bytes`` fields.
:rtype: :class:`grpc.framework.alpha._reexport._CancellableIterator`
:returns: A cancel-able iterator. Can be consumed by calling ``next()``
or by casting to a :class:`list` and can be cancelled by
calling ``cancel()``.
"""
request_pb = data_messages_v2_pb2.SampleRowKeysRequest(
table_name=self.name)
client = self._instance._client
response_iterator = client._data_stub.SampleRowKeys(
request_pb, client.timeout_seconds)
return response_iterator
def _create_row_request(table_name, row_key=None, start_key=None, end_key=None,
filter_=None, limit=None):
"""Creates a request to read rows in a table.
:type table_name: str
:param table_name: The name of the table to read from.
:type row_key: bytes
:param row_key: (Optional) The key of a specific row to read from.
:type start_key: bytes
:param start_key: (Optional) The beginning of a range of row keys to
read from. The range will include ``start_key``. If
left empty, will be interpreted as the empty string.
:type end_key: bytes
:param end_key: (Optional) The end of a range of row keys to read from.
The range will not include ``end_key``. If left empty,
will be interpreted as an infinite string.
:type filter_: :class:`.RowFilter`
:param filter_: (Optional) The filter to apply to the contents of the
specified row(s). If unset, reads the entire table.
:type limit: int
:param limit: (Optional) The read will terminate after committing to N
rows' worth of results. The default (zero) is to return
all results.
:rtype: :class:`data_messages_v2_pb2.ReadRowsRequest`
:returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs.
:raises: :class:`ValueError <exceptions.ValueError>` if both
``row_key`` and one of ``start_key`` and ``end_key`` are set
"""
request_kwargs = {'table_name': table_name}
if (row_key is not None and
(start_key is not None or end_key is not None)):
raise ValueError('Row key and row range cannot be '
'set simultaneously')
range_kwargs = {}
if start_key is not None or end_key is not None:
if start_key is not None:
range_kwargs['start_key_closed'] = _to_bytes(start_key)
if end_key is not None:
range_kwargs['end_key_open'] = _to_bytes(end_key)
if filter_ is not None:
request_kwargs['filter'] = filter_.to_pb()
if limit is not None:
request_kwargs['rows_limit'] = limit
message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs)
if row_key is not None:
message.rows.row_keys.append(_to_bytes(row_key))
if range_kwargs:
message.rows.row_ranges.add(**range_kwargs)
return message

View file

@ -0,0 +1,784 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestClient(unittest2.TestCase):
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
TIMEOUT_SECONDS = 80
USER_AGENT = 'you-sir-age-int'
def _getTargetClass(self):
from gcloud.bigtable.client import Client
return Client
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _constructor_test_helper(self, expected_scopes, creds,
read_only=False, admin=False,
user_agent=None, timeout_seconds=None,
expected_creds=None):
from gcloud.bigtable import client as MUT
user_agent = user_agent or MUT.DEFAULT_USER_AGENT
timeout_seconds = timeout_seconds or MUT.DEFAULT_TIMEOUT_SECONDS
client = self._makeOne(project=self.PROJECT, credentials=creds,
read_only=read_only, admin=admin,
user_agent=user_agent,
timeout_seconds=timeout_seconds)
expected_creds = expected_creds or creds
self.assertTrue(client._credentials is expected_creds)
if expected_scopes is not None:
self.assertEqual(client._credentials.scopes, expected_scopes)
self.assertEqual(client.project, self.PROJECT)
self.assertEqual(client.timeout_seconds, timeout_seconds)
self.assertEqual(client.user_agent, user_agent)
# Check stubs are set (but null)
self.assertEqual(client._data_stub_internal, None)
self.assertEqual(client._instance_stub_internal, None)
self.assertEqual(client._operations_stub_internal, None)
self.assertEqual(client._table_stub_internal, None)
def test_constructor_default_scopes(self):
from gcloud.bigtable import client as MUT
expected_scopes = [MUT.DATA_SCOPE]
creds = _Credentials()
self._constructor_test_helper(expected_scopes, creds)
def test_constructor_custom_user_agent_and_timeout(self):
from gcloud.bigtable import client as MUT
CUSTOM_TIMEOUT_SECONDS = 1337
CUSTOM_USER_AGENT = 'custom-application'
expected_scopes = [MUT.DATA_SCOPE]
creds = _Credentials()
self._constructor_test_helper(expected_scopes, creds,
user_agent=CUSTOM_USER_AGENT,
timeout_seconds=CUSTOM_TIMEOUT_SECONDS)
def test_constructor_with_admin(self):
from gcloud.bigtable import client as MUT
expected_scopes = [MUT.DATA_SCOPE, MUT.ADMIN_SCOPE]
creds = _Credentials()
self._constructor_test_helper(expected_scopes, creds, admin=True)
def test_constructor_with_read_only(self):
from gcloud.bigtable import client as MUT
expected_scopes = [MUT.READ_ONLY_SCOPE]
creds = _Credentials()
self._constructor_test_helper(expected_scopes, creds, read_only=True)
def test_constructor_both_admin_and_read_only(self):
creds = _Credentials()
with self.assertRaises(ValueError):
self._constructor_test_helper([], creds, admin=True,
read_only=True)
def test_constructor_implicit_credentials(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import client as MUT
creds = _Credentials()
expected_scopes = [MUT.DATA_SCOPE]
def mock_get_credentials():
return creds
with _Monkey(MUT, get_credentials=mock_get_credentials):
self._constructor_test_helper(expected_scopes, None,
expected_creds=creds)
def test_constructor_credentials_wo_create_scoped(self):
creds = object()
expected_scopes = None
self._constructor_test_helper(expected_scopes, creds)
def _context_manager_helper(self):
credentials = _Credentials()
client = self._makeOne(project=self.PROJECT, credentials=credentials)
def mock_start():
client._data_stub_internal = object()
client.start = mock_start
def mock_stop():
client._data_stub_internal = None
client.stop = mock_stop
return client
def test_context_manager(self):
client = self._context_manager_helper()
self.assertFalse(client.is_started())
with client:
self.assertTrue(client.is_started())
self.assertFalse(client.is_started())
def test_context_manager_as_keyword(self):
with self._context_manager_helper() as client:
self.assertIsNotNone(client)
def test_context_manager_with_exception(self):
client = self._context_manager_helper()
self.assertFalse(client.is_started())
class DummyException(Exception):
pass
try:
with client:
self.assertTrue(client.is_started())
raise DummyException()
except DummyException:
pass
self.assertFalse(client.is_started())
def _copy_test_helper(self, read_only=False, admin=False):
credentials = _Credentials('value')
client = self._makeOne(
project=self.PROJECT,
credentials=credentials,
read_only=read_only,
admin=admin,
timeout_seconds=self.TIMEOUT_SECONDS,
user_agent=self.USER_AGENT)
# Put some fake stubs in place so that we can verify they
# don't get copied.
client._data_stub_internal = object()
client._instance_stub_internal = object()
client._operations_stub_internal = object()
client._table_stub_internal = object()
new_client = client.copy()
self.assertEqual(new_client._admin, client._admin)
self.assertEqual(new_client._credentials, client._credentials)
self.assertEqual(new_client.project, client.project)
self.assertEqual(new_client.user_agent, client.user_agent)
self.assertEqual(new_client.timeout_seconds, client.timeout_seconds)
# Make sure stubs are not preserved.
self.assertEqual(new_client._data_stub_internal, None)
self.assertEqual(new_client._instance_stub_internal, None)
self.assertEqual(new_client._operations_stub_internal, None)
self.assertEqual(new_client._table_stub_internal, None)
def test_copy(self):
self._copy_test_helper()
def test_copy_admin(self):
self._copy_test_helper(admin=True)
def test_copy_read_only(self):
self._copy_test_helper(read_only=True)
def test_credentials_getter(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
self.assertTrue(client.credentials is credentials)
def test_project_name_property(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
project_name = 'projects/' + project
self.assertEqual(client.project_name, project_name)
def test_data_stub_getter(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
client._data_stub_internal = object()
self.assertTrue(client._data_stub is client._data_stub_internal)
def test_data_stub_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
with self.assertRaises(ValueError):
getattr(client, '_data_stub')
def test_instance_stub_getter(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=True)
client._instance_stub_internal = object()
self.assertTrue(
client._instance_stub is client._instance_stub_internal)
def test_instance_stub_non_admin_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=False)
with self.assertRaises(ValueError):
getattr(client, '_instance_stub')
def test_instance_stub_unset_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=True)
with self.assertRaises(ValueError):
getattr(client, '_instance_stub')
def test_operations_stub_getter(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=True)
client._operations_stub_internal = object()
self.assertTrue(client._operations_stub is
client._operations_stub_internal)
def test_operations_stub_non_admin_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=False)
with self.assertRaises(ValueError):
getattr(client, '_operations_stub')
def test_operations_stub_unset_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=True)
with self.assertRaises(ValueError):
getattr(client, '_operations_stub')
def test_table_stub_getter(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=True)
client._table_stub_internal = object()
self.assertTrue(client._table_stub is client._table_stub_internal)
def test_table_stub_non_admin_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=False)
with self.assertRaises(ValueError):
getattr(client, '_table_stub')
def test_table_stub_unset_failure(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=True)
with self.assertRaises(ValueError):
getattr(client, '_table_stub')
def test__make_data_stub(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import client as MUT
from gcloud.bigtable.client import DATA_API_HOST_V2
from gcloud.bigtable.client import DATA_API_PORT_V2
from gcloud.bigtable.client import DATA_STUB_FACTORY_V2
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
fake_stub = object()
make_stub_args = []
def mock_make_stub(*args):
make_stub_args.append(args)
return fake_stub
with _Monkey(MUT, _make_stub=mock_make_stub):
result = client._make_data_stub()
self.assertTrue(result is fake_stub)
self.assertEqual(make_stub_args, [
(
client,
DATA_STUB_FACTORY_V2,
DATA_API_HOST_V2,
DATA_API_PORT_V2,
),
])
def test__make_instance_stub(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import client as MUT
from gcloud.bigtable.client import INSTANCE_ADMIN_HOST_V2
from gcloud.bigtable.client import INSTANCE_ADMIN_PORT_V2
from gcloud.bigtable.client import INSTANCE_STUB_FACTORY_V2
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
fake_stub = object()
make_stub_args = []
def mock_make_stub(*args):
make_stub_args.append(args)
return fake_stub
with _Monkey(MUT, _make_stub=mock_make_stub):
result = client._make_instance_stub()
self.assertTrue(result is fake_stub)
self.assertEqual(make_stub_args, [
(
client,
INSTANCE_STUB_FACTORY_V2,
INSTANCE_ADMIN_HOST_V2,
INSTANCE_ADMIN_PORT_V2,
),
])
def test__make_operations_stub(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import client as MUT
from gcloud.bigtable.client import OPERATIONS_API_HOST_V2
from gcloud.bigtable.client import OPERATIONS_API_PORT_V2
from gcloud.bigtable.client import OPERATIONS_STUB_FACTORY_V2
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
fake_stub = object()
make_stub_args = []
def mock_make_stub(*args):
make_stub_args.append(args)
return fake_stub
with _Monkey(MUT, _make_stub=mock_make_stub):
result = client._make_operations_stub()
self.assertTrue(result is fake_stub)
self.assertEqual(make_stub_args, [
(
client,
OPERATIONS_STUB_FACTORY_V2,
OPERATIONS_API_HOST_V2,
OPERATIONS_API_PORT_V2,
),
])
def test__make_table_stub(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import client as MUT
from gcloud.bigtable.client import TABLE_ADMIN_HOST_V2
from gcloud.bigtable.client import TABLE_ADMIN_PORT_V2
from gcloud.bigtable.client import TABLE_STUB_FACTORY_V2
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
fake_stub = object()
make_stub_args = []
def mock_make_stub(*args):
make_stub_args.append(args)
return fake_stub
with _Monkey(MUT, _make_stub=mock_make_stub):
result = client._make_table_stub()
self.assertTrue(result is fake_stub)
self.assertEqual(make_stub_args, [
(
client,
TABLE_STUB_FACTORY_V2,
TABLE_ADMIN_HOST_V2,
TABLE_ADMIN_PORT_V2,
),
])
def test_is_started(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
self.assertFalse(client.is_started())
client._data_stub_internal = object()
self.assertTrue(client.is_started())
client._data_stub_internal = None
self.assertFalse(client.is_started())
def _start_method_helper(self, admin):
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import client as MUT
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=admin)
stub = _FakeStub()
make_stub_args = []
def mock_make_stub(*args):
make_stub_args.append(args)
return stub
with _Monkey(MUT, _make_stub=mock_make_stub):
client.start()
self.assertTrue(client._data_stub_internal is stub)
if admin:
self.assertTrue(client._instance_stub_internal is stub)
self.assertTrue(client._operations_stub_internal is stub)
self.assertTrue(client._table_stub_internal is stub)
self.assertEqual(stub._entered, 4)
self.assertEqual(len(make_stub_args), 4)
else:
self.assertTrue(client._instance_stub_internal is None)
self.assertTrue(client._operations_stub_internal is None)
self.assertTrue(client._table_stub_internal is None)
self.assertEqual(stub._entered, 1)
self.assertEqual(len(make_stub_args), 1)
self.assertEqual(stub._exited, [])
def test_start_non_admin(self):
self._start_method_helper(admin=False)
def test_start_with_admin(self):
self._start_method_helper(admin=True)
def test_start_while_started(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
client._data_stub_internal = data_stub = object()
self.assertTrue(client.is_started())
client.start()
# Make sure the stub did not change.
self.assertEqual(client._data_stub_internal, data_stub)
def _stop_method_helper(self, admin):
from gcloud.bigtable._testing import _FakeStub
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials,
admin=admin)
stub1 = _FakeStub()
stub2 = _FakeStub()
client._data_stub_internal = stub1
client._instance_stub_internal = stub2
client._operations_stub_internal = stub2
client._table_stub_internal = stub2
client.stop()
self.assertTrue(client._data_stub_internal is None)
self.assertTrue(client._instance_stub_internal is None)
self.assertTrue(client._operations_stub_internal is None)
self.assertTrue(client._table_stub_internal is None)
self.assertEqual(stub1._entered, 0)
self.assertEqual(stub2._entered, 0)
exc_none_triple = (None, None, None)
self.assertEqual(stub1._exited, [exc_none_triple])
if admin:
self.assertEqual(stub2._exited, [exc_none_triple] * 3)
else:
self.assertEqual(stub2._exited, [])
def test_stop_non_admin(self):
self._stop_method_helper(admin=False)
def test_stop_with_admin(self):
self._stop_method_helper(admin=True)
def test_stop_while_stopped(self):
credentials = _Credentials()
project = 'PROJECT'
client = self._makeOne(project=project, credentials=credentials)
self.assertFalse(client.is_started())
# This is a bit hacky. We set the cluster stub protected value
# since it isn't used in is_started() and make sure that stop
# doesn't reset this value to None.
client._instance_stub_internal = instance_stub = object()
client.stop()
# Make sure the cluster stub did not change.
self.assertEqual(client._instance_stub_internal, instance_stub)
def test_instance_factory_defaults(self):
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
from gcloud.bigtable.instance import Instance
from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
credentials = _Credentials()
client = self._makeOne(project=PROJECT, credentials=credentials)
instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME)
self.assertTrue(isinstance(instance, Instance))
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id,
_EXISTING_INSTANCE_LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES)
self.assertTrue(instance._client is client)
def test_instance_factory_w_explicit_serve_nodes(self):
from gcloud.bigtable.instance import Instance
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
LOCATION_ID = 'locname'
SERVE_NODES = 5
credentials = _Credentials()
client = self._makeOne(project=PROJECT, credentials=credentials)
instance = client.instance(
INSTANCE_ID, display_name=DISPLAY_NAME,
location=LOCATION_ID, serve_nodes=SERVE_NODES)
self.assertTrue(isinstance(instance, Instance))
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id, LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES)
self.assertTrue(instance._client is client)
def test_list_instances(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from gcloud.bigtable._testing import _FakeStub
LOCATION = 'projects/' + self.PROJECT + '/locations/locname'
FAILED_LOCATION = 'FAILED'
INSTANCE_ID1 = 'instance-id1'
INSTANCE_ID2 = 'instance-id2'
INSTANCE_NAME1 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1)
INSTANCE_NAME2 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2)
credentials = _Credentials()
client = self._makeOne(
project=self.PROJECT,
credentials=credentials,
admin=True,
timeout_seconds=self.TIMEOUT_SECONDS,
)
# Create request_pb
request_pb = messages_v2_pb2.ListInstancesRequest(
parent='projects/' + self.PROJECT,
)
# Create response_pb
response_pb = messages_v2_pb2.ListInstancesResponse(
failed_locations=[
FAILED_LOCATION,
],
instances=[
data_v2_pb2.Instance(
name=INSTANCE_NAME1,
display_name=INSTANCE_NAME1,
),
data_v2_pb2.Instance(
name=INSTANCE_NAME2,
display_name=INSTANCE_NAME2,
),
],
)
# Patch the stub used by the API method.
client._instance_stub_internal = stub = _FakeStub(response_pb)
# Create expected_result.
failed_locations = [FAILED_LOCATION]
instances = [
client.instance(INSTANCE_ID1, LOCATION),
client.instance(INSTANCE_ID2, LOCATION),
]
expected_result = (instances, failed_locations)
# Perform the method and check the result.
result = client.list_instances()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ListInstances',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
class Test_MetadataPlugin(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.client import _MetadataPlugin
return _MetadataPlugin
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
from gcloud.bigtable.client import Client
from gcloud.bigtable.client import DATA_SCOPE
PROJECT = 'PROJECT'
USER_AGENT = 'USER_AGENT'
credentials = _Credentials()
client = Client(project=PROJECT, credentials=credentials,
user_agent=USER_AGENT)
transformer = self._makeOne(client)
self.assertTrue(transformer._credentials is credentials)
self.assertEqual(transformer._user_agent, USER_AGENT)
self.assertEqual(credentials.scopes, [DATA_SCOPE])
def test___call__(self):
from gcloud.bigtable.client import Client
from gcloud.bigtable.client import DATA_SCOPE
from gcloud.bigtable.client import DEFAULT_USER_AGENT
access_token_expected = 'FOOBARBAZ'
credentials = _Credentials(access_token=access_token_expected)
project = 'PROJECT'
client = Client(project=project, credentials=credentials)
callback_args = []
def callback(*args):
callback_args.append(args)
transformer = self._makeOne(client)
result = transformer(None, callback)
cb_headers = [
('Authorization', 'Bearer ' + access_token_expected),
('User-agent', DEFAULT_USER_AGENT),
]
self.assertEqual(result, None)
self.assertEqual(callback_args, [(cb_headers, None)])
self.assertEqual(credentials.scopes, [DATA_SCOPE])
self.assertEqual(len(credentials._tokens), 1)
class Test__make_stub(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.client import _make_stub
return _make_stub(*args, **kwargs)
def test_it(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import client as MUT
mock_result = object()
stub_inputs = []
SSL_CREDS = object()
METADATA_CREDS = object()
COMPOSITE_CREDS = object()
CHANNEL = object()
class _ImplementationsModule(object):
def __init__(self):
self.ssl_channel_credentials_args = None
self.metadata_call_credentials_args = None
self.composite_channel_credentials_args = None
self.secure_channel_args = None
def ssl_channel_credentials(self, *args):
self.ssl_channel_credentials_args = args
return SSL_CREDS
def metadata_call_credentials(self, *args, **kwargs):
self.metadata_call_credentials_args = (args, kwargs)
return METADATA_CREDS
def composite_channel_credentials(self, *args):
self.composite_channel_credentials_args = args
return COMPOSITE_CREDS
def secure_channel(self, *args):
self.secure_channel_args = args
return CHANNEL
implementations_mod = _ImplementationsModule()
def mock_stub_factory(channel):
stub_inputs.append(channel)
return mock_result
metadata_plugin = object()
clients = []
def mock_plugin(client):
clients.append(client)
return metadata_plugin
host = 'HOST'
port = 1025
client = object()
with _Monkey(MUT, implementations=implementations_mod,
_MetadataPlugin=mock_plugin):
result = self._callFUT(client, mock_stub_factory, host, port)
self.assertTrue(result is mock_result)
self.assertEqual(stub_inputs, [CHANNEL])
self.assertEqual(clients, [client])
self.assertEqual(implementations_mod.ssl_channel_credentials_args,
(None, None, None))
self.assertEqual(implementations_mod.metadata_call_credentials_args,
((metadata_plugin,), {'name': 'google_creds'}))
self.assertEqual(
implementations_mod.composite_channel_credentials_args,
(SSL_CREDS, METADATA_CREDS))
self.assertEqual(implementations_mod.secure_channel_args,
(host, port, COMPOSITE_CREDS))
class _Credentials(object):
scopes = None
def __init__(self, access_token=None):
self._access_token = access_token
self._tokens = []
def get_access_token(self):
from oauth2client.client import AccessTokenInfo
token = AccessTokenInfo(access_token=self._access_token,
expires_in=None)
self._tokens.append(token)
return token
def create_scoped(self, scope):
self.scopes = scope
return self
def __eq__(self, other):
return self._access_token == other._access_token

View file

@ -0,0 +1,643 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestOperation(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.cluster import Operation
return Operation
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _constructor_test_helper(self, cluster=None):
op_type = 'fake-op'
op_id = 8915
operation = self._makeOne(op_type, op_id, cluster=cluster)
self.assertEqual(operation.op_type, op_type)
self.assertEqual(operation.op_id, op_id)
self.assertEqual(operation._cluster, cluster)
self.assertFalse(operation._complete)
def test_constructor_defaults(self):
self._constructor_test_helper()
def test_constructor_explicit_cluster(self):
cluster = object()
self._constructor_test_helper(cluster=cluster)
def test___eq__(self):
op_type = 'fake-op'
op_id = 8915
cluster = object()
operation1 = self._makeOne(op_type, op_id, cluster=cluster)
operation2 = self._makeOne(op_type, op_id, cluster=cluster)
self.assertEqual(operation1, operation2)
def test___eq__type_differ(self):
operation1 = self._makeOne('foo', 123, None)
operation2 = object()
self.assertNotEqual(operation1, operation2)
def test___ne__same_value(self):
op_type = 'fake-op'
op_id = 8915
cluster = object()
operation1 = self._makeOne(op_type, op_id, cluster=cluster)
operation2 = self._makeOne(op_type, op_id, cluster=cluster)
comparison_val = (operation1 != operation2)
self.assertFalse(comparison_val)
def test___ne__(self):
operation1 = self._makeOne('foo', 123, None)
operation2 = self._makeOne('bar', 456, None)
self.assertNotEqual(operation1, operation2)
def test_finished_without_operation(self):
operation = self._makeOne(None, None, None)
operation._complete = True
with self.assertRaises(ValueError):
operation.finished()
def _finished_helper(self, done):
from google.longrunning import operations_pb2
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable.cluster import Cluster
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
CLUSTER_ID = 'cluster-id'
OP_TYPE = 'fake-op'
OP_ID = 789
timeout_seconds = 1
client = _Client(PROJECT, timeout_seconds=timeout_seconds)
instance = _Instance(INSTANCE_ID, client)
cluster = Cluster(CLUSTER_ID, instance)
operation = self._makeOne(OP_TYPE, OP_ID, cluster=cluster)
# Create request_pb
op_name = ('operations/projects/' + PROJECT +
'/instances/' + INSTANCE_ID +
'/clusters/' + CLUSTER_ID +
'/operations/%d' % (OP_ID,))
request_pb = operations_pb2.GetOperationRequest(name=op_name)
# Create response_pb
response_pb = operations_pb2.Operation(done=done)
# Patch the stub used by the API method.
client._operations_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = done
# Perform the method and check the result.
result = operation.finished()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'GetOperation',
(request_pb, timeout_seconds),
{},
)])
if done:
self.assertTrue(operation._complete)
else:
self.assertFalse(operation._complete)
def test_finished(self):
self._finished_helper(done=True)
def test_finished_not_done(self):
self._finished_helper(done=False)
class TestCluster(unittest2.TestCase):
PROJECT = 'project'
INSTANCE_ID = 'instance-id'
CLUSTER_ID = 'cluster-id'
CLUSTER_NAME = ('projects/' + PROJECT +
'/instances/' + INSTANCE_ID +
'/clusters/' + CLUSTER_ID)
TIMEOUT_SECONDS = 123
def _getTargetClass(self):
from gcloud.bigtable.cluster import Cluster
return Cluster
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertTrue(cluster._instance is instance)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
def test_constructor_non_default(self):
SERVE_NODES = 8
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertTrue(cluster._instance is instance)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
def test_copy(self):
SERVE_NODES = 8
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
new_cluster = cluster.copy()
# Make sure the client copy succeeded.
self.assertFalse(new_cluster._instance is instance)
self.assertEqual(new_cluster.serve_nodes, SERVE_NODES)
# Make sure the client got copied to a new instance.
self.assertFalse(cluster is new_cluster)
self.assertEqual(cluster, new_cluster)
def test__update_from_pb_success(self):
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
SERVE_NODES = 8
cluster_pb = _ClusterPB(
serve_nodes=SERVE_NODES,
)
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
cluster._update_from_pb(cluster_pb)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
def test__update_from_pb_no_serve_nodes(self):
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
cluster_pb = _ClusterPB()
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
with self.assertRaises(ValueError):
cluster._update_from_pb(cluster_pb)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
def test_from_pb_success(self):
SERVE_NODES = 331
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster_pb = _ClusterPB(
name=self.CLUSTER_NAME,
serve_nodes=SERVE_NODES,
)
klass = self._getTargetClass()
cluster = klass.from_pb(cluster_pb, instance)
self.assertTrue(isinstance(cluster, klass))
self.assertTrue(cluster._instance is instance)
self.assertEqual(cluster.cluster_id, self.CLUSTER_ID)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
def test_from_pb_bad_cluster_name(self):
BAD_CLUSTER_NAME = 'INCORRECT_FORMAT'
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME)
klass = self._getTargetClass()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_from_pb_project_mistmatch(self):
ALT_PROJECT = 'ALT_PROJECT'
client = _Client(ALT_PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
cluster_pb = _ClusterPB(name=self.CLUSTER_NAME)
klass = self._getTargetClass()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_from_pb_instance_mistmatch(self):
ALT_INSTANCE_ID = 'ALT_INSTANCE_ID'
client = _Client(self.PROJECT)
instance = _Instance(ALT_INSTANCE_ID, client)
self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID)
cluster_pb = _ClusterPB(name=self.CLUSTER_NAME)
klass = self._getTargetClass()
with self.assertRaises(ValueError):
klass.from_pb(cluster_pb, instance)
def test_name_property(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
self.assertEqual(cluster.name, self.CLUSTER_NAME)
def test___eq__(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._makeOne(self.CLUSTER_ID, instance)
cluster2 = self._makeOne(self.CLUSTER_ID, instance)
self.assertEqual(cluster1, cluster2)
def test___eq__type_differ(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._makeOne(self.CLUSTER_ID, instance)
cluster2 = object()
self.assertNotEqual(cluster1, cluster2)
def test___ne__same_value(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._makeOne(self.CLUSTER_ID, instance)
cluster2 = self._makeOne(self.CLUSTER_ID, instance)
comparison_val = (cluster1 != cluster2)
self.assertFalse(comparison_val)
def test___ne__(self):
client = _Client(self.PROJECT)
instance = _Instance(self.INSTANCE_ID, client)
cluster1 = self._makeOne('cluster_id1', instance)
cluster2 = self._makeOne('cluster_id2', instance)
self.assertNotEqual(cluster1, cluster2)
def test_reload(self):
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
SERVE_NODES = 31
LOCATION = 'LOCATION'
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
# Create request_pb
request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME)
# Create response_pb
response_pb = _ClusterPB(
serve_nodes=SERVE_NODES,
location=LOCATION,
)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Cluster optional config values before.
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
# Perform the method and check the result.
result = cluster.reload()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'GetCluster',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
# Check Cluster optional config values before.
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
self.assertEqual(cluster.location, LOCATION)
def test_create(self):
from google.longrunning import operations_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import cluster as MUT
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
# Create request_pb. Just a mock since we monkey patch
# _prepare_create_request
request_pb = object()
# Create response_pb
OP_ID = 5678
OP_NAME = (
'operations/projects/%s/instances/%s/clusters/%s/operations/%d' %
(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID))
response_pb = operations_pb2.Operation(name=OP_NAME)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = MUT.Operation('create', OP_ID, cluster=cluster)
# Create the mocks.
prep_create_called = []
def mock_prep_create_req(cluster):
prep_create_called.append(cluster)
return request_pb
process_operation_called = []
def mock_process_operation(operation_pb):
process_operation_called.append(operation_pb)
return OP_ID
# Perform the method and check the result.
with _Monkey(MUT, _prepare_create_request=mock_prep_create_req,
_process_operation=mock_process_operation):
result = cluster.create()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CreateCluster',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
self.assertEqual(prep_create_called, [cluster])
self.assertEqual(process_operation_called, [response_pb])
def test_update(self):
from google.longrunning import operations_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import cluster as MUT
SERVE_NODES = 81
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
# Create request_pb
request_pb = _ClusterPB(
name=self.CLUSTER_NAME,
serve_nodes=SERVE_NODES,
)
# Create response_pb
response_pb = operations_pb2.Operation()
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
OP_ID = 5678
expected_result = MUT.Operation('update', OP_ID, cluster=cluster)
# Create mocks
process_operation_called = []
def mock_process_operation(operation_pb):
process_operation_called.append(operation_pb)
return OP_ID
# Perform the method and check the result.
with _Monkey(MUT, _process_operation=mock_process_operation):
result = cluster.update()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'UpdateCluster',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
self.assertEqual(process_operation_called, [response_pb])
def test_delete(self):
from google.protobuf import empty_pb2
from gcloud.bigtable._testing import _FakeStub
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_ID, client)
cluster = self._makeOne(self.CLUSTER_ID, instance)
# Create request_pb
request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = cluster.delete()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'DeleteCluster',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
class Test__prepare_create_request(unittest2.TestCase):
def _callFUT(self, cluster):
from gcloud.bigtable.cluster import _prepare_create_request
return _prepare_create_request(cluster)
def test_it(self):
from gcloud.bigtable.cluster import Cluster
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
CLUSTER_ID = 'cluster-id'
SERVE_NODES = 8
client = _Client(PROJECT)
instance = _Instance(INSTANCE_ID, client)
cluster = Cluster(CLUSTER_ID, instance,
serve_nodes=SERVE_NODES)
request_pb = self._callFUT(cluster)
self.assertEqual(request_pb.cluster_id, CLUSTER_ID)
self.assertEqual(request_pb.parent, instance.name)
self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES)
class Test__parse_pb_any_to_native(unittest2.TestCase):
def _callFUT(self, any_val, expected_type=None):
from gcloud.bigtable.cluster import _parse_pb_any_to_native
return _parse_pb_any_to_native(any_val, expected_type=expected_type)
def test_with_known_type_url(self):
from google.protobuf import any_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable import cluster as MUT
cell = _CellPB(
timestamp_micros=0,
value=b'foobar',
)
type_url = 'type.googleapis.com/' + cell.DESCRIPTOR.full_name
fake_type_url_map = {type_url: cell.__class__}
any_val = any_pb2.Any(
type_url=type_url,
value=cell.SerializeToString(),
)
with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map):
result = self._callFUT(any_val)
self.assertEqual(result, cell)
def test_unknown_type_url(self):
from google.protobuf import any_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable import cluster as MUT
fake_type_url_map = {}
any_val = any_pb2.Any()
with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map):
with self.assertRaises(KeyError):
self._callFUT(any_val)
def test_disagreeing_type_url(self):
from google.protobuf import any_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable import cluster as MUT
type_url1 = 'foo'
type_url2 = 'bar'
fake_type_url_map = {type_url1: None}
any_val = any_pb2.Any(type_url=type_url2)
with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map):
with self.assertRaises(ValueError):
self._callFUT(any_val, expected_type=type_url1)
class Test__process_operation(unittest2.TestCase):
def _callFUT(self, operation_pb):
from gcloud.bigtable.cluster import _process_operation
return _process_operation(operation_pb)
def test_it(self):
from google.longrunning import operations_pb2
PROJECT = 'project'
INSTANCE_ID = 'instance-id'
CLUSTER_ID = 'cluster-id'
EXPECTED_OPERATION_ID = 234
OPERATION_NAME = (
'operations/projects/%s/instances/%s/clusters/%s/operations/%d' %
(PROJECT, INSTANCE_ID, CLUSTER_ID, EXPECTED_OPERATION_ID))
operation_pb = operations_pb2.Operation(name=OPERATION_NAME)
# Exectute method with mocks in place.
operation_id = self._callFUT(operation_pb)
# Check outputs.
self.assertEqual(operation_id, EXPECTED_OPERATION_ID)
def test_op_name_parsing_failure(self):
from google.longrunning import operations_pb2
operation_pb = operations_pb2.Operation(name='invalid')
with self.assertRaises(ValueError):
self._callFUT(operation_pb)
def _CellPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Cell(*args, **kw)
def _ClusterPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as instance_v2_pb2)
return instance_v2_pb2.Cluster(*args, **kw)
def _DeleteClusterRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
return messages_v2_pb2.DeleteClusterRequest(*args, **kw)
def _GetClusterRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
return messages_v2_pb2.GetClusterRequest(*args, **kw)
class _Instance(object):
def __init__(self, instance_id, client):
self.instance_id = instance_id
self._client = client
@property
def name(self):
return 'projects/%s/instances/%s' % (
self._client.project, self.instance_id)
def copy(self):
return self.__class__(self.instance_id, self._client)
def __eq__(self, other):
return (other.instance_id == self.instance_id and
other._client == self._client)
class _Client(object):
def __init__(self, project, timeout_seconds=None):
self.project = project
self.project_name = 'projects/' + self.project
self.timeout_seconds = timeout_seconds
def __eq__(self, other):
return (other.project == self.project and
other.project_name == self.project_name and
other.timeout_seconds == self.timeout_seconds)

View file

@ -0,0 +1,669 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test__timedelta_to_duration_pb(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.column_family import _timedelta_to_duration_pb
return _timedelta_to_duration_pb(*args, **kwargs)
def test_it(self):
import datetime
from google.protobuf import duration_pb2
seconds = microseconds = 1
timedelta_val = datetime.timedelta(seconds=seconds,
microseconds=microseconds)
result = self._callFUT(timedelta_val)
self.assertTrue(isinstance(result, duration_pb2.Duration))
self.assertEqual(result.seconds, seconds)
self.assertEqual(result.nanos, 1000 * microseconds)
def test_with_negative_microseconds(self):
import datetime
from google.protobuf import duration_pb2
seconds = 1
microseconds = -5
timedelta_val = datetime.timedelta(seconds=seconds,
microseconds=microseconds)
result = self._callFUT(timedelta_val)
self.assertTrue(isinstance(result, duration_pb2.Duration))
self.assertEqual(result.seconds, seconds - 1)
self.assertEqual(result.nanos, 10**9 + 1000 * microseconds)
def test_with_negative_seconds(self):
import datetime
from google.protobuf import duration_pb2
seconds = -1
microseconds = 5
timedelta_val = datetime.timedelta(seconds=seconds,
microseconds=microseconds)
result = self._callFUT(timedelta_val)
self.assertTrue(isinstance(result, duration_pb2.Duration))
self.assertEqual(result.seconds, seconds + 1)
self.assertEqual(result.nanos, -(10**9 - 1000 * microseconds))
class Test__duration_pb_to_timedelta(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.column_family import _duration_pb_to_timedelta
return _duration_pb_to_timedelta(*args, **kwargs)
def test_it(self):
import datetime
from google.protobuf import duration_pb2
seconds = microseconds = 1
duration_pb = duration_pb2.Duration(seconds=seconds,
nanos=1000 * microseconds)
timedelta_val = datetime.timedelta(seconds=seconds,
microseconds=microseconds)
result = self._callFUT(duration_pb)
self.assertTrue(isinstance(result, datetime.timedelta))
self.assertEqual(result, timedelta_val)
class TestMaxVersionsGCRule(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.column_family import MaxVersionsGCRule
return MaxVersionsGCRule
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test___eq__type_differ(self):
gc_rule1 = self._makeOne(10)
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___eq__same_value(self):
gc_rule1 = self._makeOne(2)
gc_rule2 = self._makeOne(2)
self.assertEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
gc_rule1 = self._makeOne(99)
gc_rule2 = self._makeOne(99)
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb(self):
max_num_versions = 1337
gc_rule = self._makeOne(max_num_versions=max_num_versions)
pb_val = gc_rule.to_pb()
expected = _GcRulePB(max_num_versions=max_num_versions)
self.assertEqual(pb_val, expected)
class TestMaxAgeGCRule(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.column_family import MaxAgeGCRule
return MaxAgeGCRule
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test___eq__type_differ(self):
max_age = object()
gc_rule1 = self._makeOne(max_age=max_age)
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___eq__same_value(self):
max_age = object()
gc_rule1 = self._makeOne(max_age=max_age)
gc_rule2 = self._makeOne(max_age=max_age)
self.assertEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
max_age = object()
gc_rule1 = self._makeOne(max_age=max_age)
gc_rule2 = self._makeOne(max_age=max_age)
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb(self):
import datetime
from google.protobuf import duration_pb2
max_age = datetime.timedelta(seconds=1)
duration = duration_pb2.Duration(seconds=1)
gc_rule = self._makeOne(max_age=max_age)
pb_val = gc_rule.to_pb()
self.assertEqual(pb_val, _GcRulePB(max_age=duration))
class TestGCRuleUnion(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.column_family import GCRuleUnion
return GCRuleUnion
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
rules = object()
rule_union = self._makeOne(rules)
self.assertTrue(rule_union.rules is rules)
def test___eq__(self):
rules = object()
gc_rule1 = self._makeOne(rules)
gc_rule2 = self._makeOne(rules)
self.assertEqual(gc_rule1, gc_rule2)
def test___eq__type_differ(self):
rules = object()
gc_rule1 = self._makeOne(rules)
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
rules = object()
gc_rule1 = self._makeOne(rules)
gc_rule2 = self._makeOne(rules)
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb(self):
import datetime
from google.protobuf import duration_pb2
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
max_num_versions = 42
rule1 = MaxVersionsGCRule(max_num_versions)
pb_rule1 = _GcRulePB(max_num_versions=max_num_versions)
max_age = datetime.timedelta(seconds=1)
rule2 = MaxAgeGCRule(max_age)
pb_rule2 = _GcRulePB(
max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = _GcRulePB(
union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2]))
gc_rule_pb = rule3.to_pb()
self.assertEqual(gc_rule_pb, pb_rule3)
def test_to_pb_nested(self):
import datetime
from google.protobuf import duration_pb2
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
max_num_versions1 = 42
rule1 = MaxVersionsGCRule(max_num_versions1)
pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1)
max_age = datetime.timedelta(seconds=1)
rule2 = MaxAgeGCRule(max_age)
pb_rule2 = _GcRulePB(
max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = _GcRulePB(
union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2]))
max_num_versions2 = 1337
rule4 = MaxVersionsGCRule(max_num_versions2)
pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2)
rule5 = self._makeOne(rules=[rule3, rule4])
pb_rule5 = _GcRulePB(
union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4]))
gc_rule_pb = rule5.to_pb()
self.assertEqual(gc_rule_pb, pb_rule5)
class TestGCRuleIntersection(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.column_family import GCRuleIntersection
return GCRuleIntersection
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
rules = object()
rule_intersection = self._makeOne(rules)
self.assertTrue(rule_intersection.rules is rules)
def test___eq__(self):
rules = object()
gc_rule1 = self._makeOne(rules)
gc_rule2 = self._makeOne(rules)
self.assertEqual(gc_rule1, gc_rule2)
def test___eq__type_differ(self):
rules = object()
gc_rule1 = self._makeOne(rules)
gc_rule2 = object()
self.assertNotEqual(gc_rule1, gc_rule2)
def test___ne__same_value(self):
rules = object()
gc_rule1 = self._makeOne(rules)
gc_rule2 = self._makeOne(rules)
comparison_val = (gc_rule1 != gc_rule2)
self.assertFalse(comparison_val)
def test_to_pb(self):
import datetime
from google.protobuf import duration_pb2
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
max_num_versions = 42
rule1 = MaxVersionsGCRule(max_num_versions)
pb_rule1 = _GcRulePB(max_num_versions=max_num_versions)
max_age = datetime.timedelta(seconds=1)
rule2 = MaxAgeGCRule(max_age)
pb_rule2 = _GcRulePB(
max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = _GcRulePB(
intersection=_GcRuleIntersectionPB(
rules=[pb_rule1, pb_rule2]))
gc_rule_pb = rule3.to_pb()
self.assertEqual(gc_rule_pb, pb_rule3)
def test_to_pb_nested(self):
import datetime
from google.protobuf import duration_pb2
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
max_num_versions1 = 42
rule1 = MaxVersionsGCRule(max_num_versions1)
pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1)
max_age = datetime.timedelta(seconds=1)
rule2 = MaxAgeGCRule(max_age)
pb_rule2 = _GcRulePB(
max_age=duration_pb2.Duration(seconds=1))
rule3 = self._makeOne(rules=[rule1, rule2])
pb_rule3 = _GcRulePB(
intersection=_GcRuleIntersectionPB(
rules=[pb_rule1, pb_rule2]))
max_num_versions2 = 1337
rule4 = MaxVersionsGCRule(max_num_versions2)
pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2)
rule5 = self._makeOne(rules=[rule3, rule4])
pb_rule5 = _GcRulePB(
intersection=_GcRuleIntersectionPB(
rules=[pb_rule3, pb_rule4]))
gc_rule_pb = rule5.to_pb()
self.assertEqual(gc_rule_pb, pb_rule5)
class TestColumnFamily(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.column_family import ColumnFamily
return ColumnFamily
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
column_family_id = u'column-family-id'
table = object()
gc_rule = object()
column_family = self._makeOne(
column_family_id, table, gc_rule=gc_rule)
self.assertEqual(column_family.column_family_id, column_family_id)
self.assertTrue(column_family._table is table)
self.assertTrue(column_family.gc_rule is gc_rule)
def test_name_property(self):
column_family_id = u'column-family-id'
table_name = 'table_name'
table = _Table(table_name)
column_family = self._makeOne(column_family_id, table)
expected_name = table_name + '/columnFamilies/' + column_family_id
self.assertEqual(column_family.name, expected_name)
def test___eq__(self):
column_family_id = 'column_family_id'
table = object()
gc_rule = object()
column_family1 = self._makeOne(column_family_id, table,
gc_rule=gc_rule)
column_family2 = self._makeOne(column_family_id, table,
gc_rule=gc_rule)
self.assertEqual(column_family1, column_family2)
def test___eq__type_differ(self):
column_family1 = self._makeOne('column_family_id', None)
column_family2 = object()
self.assertNotEqual(column_family1, column_family2)
def test___ne__same_value(self):
column_family_id = 'column_family_id'
table = object()
gc_rule = object()
column_family1 = self._makeOne(column_family_id, table,
gc_rule=gc_rule)
column_family2 = self._makeOne(column_family_id, table,
gc_rule=gc_rule)
comparison_val = (column_family1 != column_family2)
self.assertFalse(comparison_val)
def test___ne__(self):
column_family1 = self._makeOne('column_family_id1', None)
column_family2 = self._makeOne('column_family_id2', None)
self.assertNotEqual(column_family1, column_family2)
def _create_test_helper(self, gc_rule=None):
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
from gcloud.bigtable._testing import _FakeStub
project_id = 'project-id'
zone = 'zone'
cluster_id = 'cluster-id'
table_id = 'table-id'
column_family_id = 'column-family-id'
timeout_seconds = 4
table_name = ('projects/' + project_id + '/zones/' + zone +
'/clusters/' + cluster_id + '/tables/' + table_id)
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
column_family = self._makeOne(
column_family_id, table, gc_rule=gc_rule)
# Create request_pb
if gc_rule is None:
column_family_pb = _ColumnFamilyPB()
else:
column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=table_name)
request_pb.modifications.add(
id=column_family_id,
create=column_family_pb,
)
# Create response_pb
response_pb = _ColumnFamilyPB()
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # create() has no return value.
# Perform the method and check the result.
self.assertEqual(stub.results, (response_pb,))
result = column_family.create()
self.assertEqual(stub.results, ())
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ModifyColumnFamilies',
(request_pb, timeout_seconds),
{},
)])
def test_create(self):
self._create_test_helper(gc_rule=None)
def test_create_with_gc_rule(self):
from gcloud.bigtable.column_family import MaxVersionsGCRule
gc_rule = MaxVersionsGCRule(1337)
self._create_test_helper(gc_rule=gc_rule)
def _update_test_helper(self, gc_rule=None):
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
project_id = 'project-id'
zone = 'zone'
cluster_id = 'cluster-id'
table_id = 'table-id'
column_family_id = 'column-family-id'
timeout_seconds = 28
table_name = ('projects/' + project_id + '/zones/' + zone +
'/clusters/' + cluster_id + '/tables/' + table_id)
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
column_family = self._makeOne(
column_family_id, table, gc_rule=gc_rule)
# Create request_pb
if gc_rule is None:
column_family_pb = _ColumnFamilyPB()
else:
column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb())
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=table_name)
request_pb.modifications.add(
id=column_family_id,
update=column_family_pb,
)
# Create response_pb
response_pb = _ColumnFamilyPB()
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # update() has no return value.
# Perform the method and check the result.
self.assertEqual(stub.results, (response_pb,))
result = column_family.update()
self.assertEqual(stub.results, ())
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ModifyColumnFamilies',
(request_pb, timeout_seconds),
{},
)])
def test_update(self):
self._update_test_helper(gc_rule=None)
def test_update_with_gc_rule(self):
from gcloud.bigtable.column_family import MaxVersionsGCRule
gc_rule = MaxVersionsGCRule(1337)
self._update_test_helper(gc_rule=gc_rule)
def test_delete(self):
from google.protobuf import empty_pb2
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
from gcloud.bigtable._testing import _FakeStub
project_id = 'project-id'
zone = 'zone'
cluster_id = 'cluster-id'
table_id = 'table-id'
column_family_id = 'column-family-id'
timeout_seconds = 7
table_name = ('projects/' + project_id + '/zones/' + zone +
'/clusters/' + cluster_id + '/tables/' + table_id)
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
column_family = self._makeOne(column_family_id, table)
# Create request_pb
request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(
name=table_name)
request_pb.modifications.add(
id=column_family_id,
drop=True)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
self.assertEqual(stub.results, (response_pb,))
result = column_family.delete()
self.assertEqual(stub.results, ())
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ModifyColumnFamilies',
(request_pb, timeout_seconds),
{},
)])
class Test__gc_rule_from_pb(unittest2.TestCase):
def _callFUT(self, *args, **kwargs):
from gcloud.bigtable.column_family import _gc_rule_from_pb
return _gc_rule_from_pb(*args, **kwargs)
def test_empty(self):
gc_rule_pb = _GcRulePB()
self.assertEqual(self._callFUT(gc_rule_pb), None)
def test_max_num_versions(self):
from gcloud.bigtable.column_family import MaxVersionsGCRule
orig_rule = MaxVersionsGCRule(1)
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, MaxVersionsGCRule))
self.assertEqual(result, orig_rule)
def test_max_age(self):
import datetime
from gcloud.bigtable.column_family import MaxAgeGCRule
orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1))
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, MaxAgeGCRule))
self.assertEqual(result, orig_rule)
def test_union(self):
import datetime
from gcloud.bigtable.column_family import GCRuleUnion
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
rule1 = MaxVersionsGCRule(1)
rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1))
orig_rule = GCRuleUnion([rule1, rule2])
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, GCRuleUnion))
self.assertEqual(result, orig_rule)
def test_intersection(self):
import datetime
from gcloud.bigtable.column_family import GCRuleIntersection
from gcloud.bigtable.column_family import MaxAgeGCRule
from gcloud.bigtable.column_family import MaxVersionsGCRule
rule1 = MaxVersionsGCRule(1)
rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1))
orig_rule = GCRuleIntersection([rule1, rule2])
gc_rule_pb = orig_rule.to_pb()
result = self._callFUT(gc_rule_pb)
self.assertTrue(isinstance(result, GCRuleIntersection))
self.assertEqual(result, orig_rule)
def test_unknown_field_name(self):
class MockProto(object):
names = []
@classmethod
def WhichOneof(cls, name):
cls.names.append(name)
return 'unknown'
self.assertEqual(MockProto.names, [])
self.assertRaises(ValueError, self._callFUT, MockProto)
self.assertEqual(MockProto.names, ['rule'])
def _GcRulePB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
return table_v2_pb2.GcRule(*args, **kw)
def _GcRuleIntersectionPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
return table_v2_pb2.GcRule.Intersection(*args, **kw)
def _GcRuleUnionPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
return table_v2_pb2.GcRule.Union(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
return table_v2_pb2.ColumnFamily(*args, **kw)
class _Instance(object):
def __init__(self, client=None):
self._client = client
class _Client(object):
def __init__(self, timeout_seconds=None):
self.timeout_seconds = timeout_seconds
class _Table(object):
def __init__(self, name, client=None):
self.name = name
self._instance = _Instance(client)

View file

@ -0,0 +1,866 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import unittest2
class TestOperation(unittest2.TestCase):
OP_TYPE = 'fake-op'
OP_ID = 8915
BEGIN = datetime.datetime(2015, 10, 22, 1, 1)
LOCATION_ID = 'loc-id'
def _getTargetClass(self):
from gcloud.bigtable.instance import Operation
return Operation
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _constructor_test_helper(self, instance=None):
operation = self._makeOne(
self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID,
instance=instance)
self.assertEqual(operation.op_type, self.OP_TYPE)
self.assertEqual(operation.op_id, self.OP_ID)
self.assertEqual(operation.begin, self.BEGIN)
self.assertEqual(operation.location_id, self.LOCATION_ID)
self.assertEqual(operation._instance, instance)
self.assertFalse(operation._complete)
def test_constructor_defaults(self):
self._constructor_test_helper()
def test_constructor_explicit_instance(self):
instance = object()
self._constructor_test_helper(instance=instance)
def test___eq__(self):
instance = object()
operation1 = self._makeOne(
self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID,
instance=instance)
operation2 = self._makeOne(
self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID,
instance=instance)
self.assertEqual(operation1, operation2)
def test___eq__type_differ(self):
operation1 = self._makeOne('foo', 123, None, self.LOCATION_ID)
operation2 = object()
self.assertNotEqual(operation1, operation2)
def test___ne__same_value(self):
instance = object()
operation1 = self._makeOne(
self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID,
instance=instance)
operation2 = self._makeOne(
self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID,
instance=instance)
comparison_val = (operation1 != operation2)
self.assertFalse(comparison_val)
def test___ne__(self):
operation1 = self._makeOne('foo', 123, None, self.LOCATION_ID)
operation2 = self._makeOne('bar', 456, None, self.LOCATION_ID)
self.assertNotEqual(operation1, operation2)
def test_finished_without_operation(self):
operation = self._makeOne(None, None, None, None)
operation._complete = True
with self.assertRaises(ValueError):
operation.finished()
def _finished_helper(self, done):
from google.longrunning import operations_pb2
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable.instance import Instance
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
TIMEOUT_SECONDS = 1
client = _Client(PROJECT, timeout_seconds=TIMEOUT_SECONDS)
instance = Instance(INSTANCE_ID, client, self.LOCATION_ID)
operation = self._makeOne(
self.OP_TYPE, self.OP_ID, self.BEGIN, self.LOCATION_ID,
instance=instance)
# Create request_pb
op_name = ('operations/projects/' + PROJECT +
'/instances/' + INSTANCE_ID +
'/locations/' + self.LOCATION_ID +
'/operations/%d' % (self.OP_ID,))
request_pb = operations_pb2.GetOperationRequest(name=op_name)
# Create response_pb
response_pb = operations_pb2.Operation(done=done)
# Patch the stub used by the API method.
client._operations_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = done
# Perform the method and check the result.
result = operation.finished()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'GetOperation',
(request_pb, TIMEOUT_SECONDS),
{},
)])
if done:
self.assertTrue(operation._complete)
else:
self.assertFalse(operation._complete)
def test_finished(self):
self._finished_helper(done=True)
def test_finished_not_done(self):
self._finished_helper(done=False)
class TestInstance(unittest2.TestCase):
PROJECT = 'project'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID
LOCATION_ID = 'locname'
LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID
DISPLAY_NAME = 'display_name'
OP_ID = 8915
OP_NAME = ('operations/projects/%s/instances/%soperations/%d' %
(PROJECT, INSTANCE_ID, OP_ID))
TABLE_ID = 'table_id'
TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID
TIMEOUT_SECONDS = 1
def _getTargetClass(self):
from gcloud.bigtable.instance import Instance
return Instance
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor_defaults(self):
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
client = object()
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertTrue(instance._client is client)
self.assertEqual(instance._cluster_location_id, self.LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES)
def test_constructor_non_default(self):
display_name = 'display_name'
client = object()
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID,
display_name=display_name)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, display_name)
self.assertTrue(instance._client is client)
def test_copy(self):
display_name = 'display_name'
client = _Client(self.PROJECT)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID,
display_name=display_name)
new_instance = instance.copy()
# Make sure the client copy succeeded.
self.assertFalse(new_instance._client is client)
self.assertEqual(new_instance._client, client)
# Make sure the client got copied to a new instance.
self.assertFalse(instance is new_instance)
self.assertEqual(instance, new_instance)
def test_table_factory(self):
from gcloud.bigtable.table import Table
instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION_ID)
table = instance.table(self.TABLE_ID)
self.assertTrue(isinstance(table, Table))
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertEqual(table._instance, instance)
def test__update_from_pb_success(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
display_name = 'display_name'
instance_pb = data_v2_pb2.Instance(
display_name=display_name,
)
instance = self._makeOne(None, None, None, None)
self.assertEqual(instance.display_name, None)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, display_name)
def test__update_from_pb_no_display_name(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
instance_pb = data_v2_pb2.Instance()
instance = self._makeOne(None, None, None, None)
self.assertEqual(instance.display_name, None)
with self.assertRaises(ValueError):
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, None)
def test_from_pb_success(self):
from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
client = _Client(project=self.PROJECT)
instance_pb = data_v2_pb2.Instance(
name=self.INSTANCE_NAME,
display_name=self.INSTANCE_ID,
)
klass = self._getTargetClass()
instance = klass.from_pb(instance_pb, client)
self.assertTrue(isinstance(instance, klass))
self.assertEqual(instance._client, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance._cluster_location_id,
_EXISTING_INSTANCE_LOCATION_ID)
def test_from_pb_bad_instance_name(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
instance_name = 'INCORRECT_FORMAT'
instance_pb = data_v2_pb2.Instance(name=instance_name)
klass = self._getTargetClass()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, None)
def test_from_pb_project_mistmatch(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
ALT_PROJECT = 'ALT_PROJECT'
client = _Client(project=ALT_PROJECT)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME)
klass = self._getTargetClass()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, client)
def test_name_property(self):
client = _Client(project=self.PROJECT)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
self.assertEqual(instance.name, self.INSTANCE_NAME)
def test___eq__(self):
client = object()
instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
self.assertEqual(instance1, instance2)
def test___eq__type_differ(self):
client = object()
instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
instance2 = object()
self.assertNotEqual(instance1, instance2)
def test___ne__same_value(self):
client = object()
instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
comparison_val = (instance1 != instance2)
self.assertFalse(comparison_val)
def test___ne__(self):
instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION_ID)
instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION_ID)
self.assertNotEqual(instance1, instance2)
def test_reload(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb)
from gcloud.bigtable._testing import _FakeStub
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
# Create request_pb
request_pb = messages_v2_pb.GetInstanceRequest(
name=self.INSTANCE_NAME)
# Create response_pb
DISPLAY_NAME = u'hey-hi-hello'
response_pb = data_v2_pb2.Instance(
display_name=DISPLAY_NAME,
)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Instance optional config values before.
self.assertEqual(instance.display_name, self.INSTANCE_ID)
# Perform the method and check the result.
result = instance.reload()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'GetInstance',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
# Check Instance optional config values before.
self.assertEqual(instance.display_name, DISPLAY_NAME)
def test_create(self):
from google.longrunning import operations_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import instance as MUT
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
# Create request_pb. Just a mock since we monkey patch
# _prepare_create_request
request_pb = object()
# Create response_pb
OP_BEGIN = object()
response_pb = operations_pb2.Operation(name=self.OP_NAME)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN,
self.LOCATION_ID, instance=instance)
# Create the mocks.
prep_create_called = []
def mock_prep_create_req(instance):
prep_create_called.append(instance)
return request_pb
process_operation_called = []
def mock_process_operation(operation_pb):
process_operation_called.append(operation_pb)
return self.OP_ID, self.LOCATION_ID, OP_BEGIN
# Perform the method and check the result.
with _Monkey(MUT,
_prepare_create_request=mock_prep_create_req,
_process_operation=mock_process_operation):
result = instance.create()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CreateInstance',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
self.assertEqual(prep_create_called, [instance])
self.assertEqual(process_operation_called, [response_pb])
def test_create_w_explicit_serve_nodes(self):
from google.longrunning import operations_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import instance as MUT
SERVE_NODES = 5
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID,
serve_nodes=SERVE_NODES)
# Create request_pb. Just a mock since we monkey patch
# _prepare_create_request
request_pb = object()
# Create response_pb
OP_BEGIN = object()
response_pb = operations_pb2.Operation(name=self.OP_NAME)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = MUT.Operation('create', self.OP_ID, OP_BEGIN,
self.LOCATION_ID, instance=instance)
# Create the mocks.
prep_create_called = []
def mock_prep_create_req(instance):
prep_create_called.append(instance)
return request_pb
process_operation_called = []
def mock_process_operation(operation_pb):
process_operation_called.append(operation_pb)
return self.OP_ID, self.LOCATION_ID, OP_BEGIN
# Perform the method and check the result.
with _Monkey(MUT,
_prepare_create_request=mock_prep_create_req,
_process_operation=mock_process_operation):
result = instance.create()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CreateInstance',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
self.assertEqual(prep_create_called, [instance])
self.assertEqual(process_operation_called, [response_pb])
def test_update(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._testing import _FakeStub
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID,
display_name=self.DISPLAY_NAME)
# Create request_pb
request_pb = data_v2_pb2.Instance(
name=self.INSTANCE_NAME,
display_name=self.DISPLAY_NAME,
)
# Create response_pb
response_pb = data_v2_pb2.Instance()
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None
# Perform the method and check the result.
result = instance.update()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'UpdateInstance',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def test_delete(self):
from google.protobuf import empty_pb2
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb)
from gcloud.bigtable._testing import _FakeStub
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
# Create request_pb
request_pb = messages_v2_pb.DeleteInstanceRequest(
name=self.INSTANCE_NAME)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = instance.delete()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'DeleteInstance',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def test_list_clusters(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as instance_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from gcloud.bigtable._testing import _FakeStub
FAILED_LOCATION = 'FAILED'
FAILED_LOCATIONS = [FAILED_LOCATION]
CLUSTER_ID1 = 'cluster-id1'
CLUSTER_ID2 = 'cluster-id2'
SERVE_NODES = 4
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1)
CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2)
# Create request_pb
request_pb = messages_v2_pb2.ListClustersRequest(
parent=instance.name,
)
# Create response_pb
response_pb = messages_v2_pb2.ListClustersResponse(
failed_locations=[FAILED_LOCATION],
clusters=[
instance_v2_pb2.Cluster(
name=CLUSTER_NAME1,
serve_nodes=SERVE_NODES,
),
instance_v2_pb2.Cluster(
name=CLUSTER_NAME2,
serve_nodes=SERVE_NODES,
),
],
)
# Patch the stub used by the API method.
client._instance_stub = stub = _FakeStub(response_pb)
# Create expected_result.
clusters = [
instance.cluster(CLUSTER_ID1),
instance.cluster(CLUSTER_ID2),
]
expected_result = (clusters, FAILED_LOCATIONS)
# Perform the method and check the result.
result = instance.list_clusters()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ListClusters',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def _list_tables_helper(self, table_name=None):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_messages_v1_pb2)
from gcloud.bigtable._testing import _FakeStub
client = _Client(self.PROJECT, timeout_seconds=self.TIMEOUT_SECONDS)
instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID)
# Create request_
request_pb = table_messages_v1_pb2.ListTablesRequest(
parent=self.INSTANCE_NAME)
# Create response_pb
if table_name is None:
table_name = self.TABLE_NAME
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[
table_data_v2_pb2.Table(name=table_name),
],
)
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_table = instance.table(self.TABLE_ID)
expected_result = [expected_table]
# Perform the method and check the result.
result = instance.list_tables()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ListTables',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def test_list_tables(self):
self._list_tables_helper()
def test_list_tables_failure_bad_split(self):
with self.assertRaises(ValueError):
self._list_tables_helper(table_name='wrong-format')
def test_list_tables_failure_name_bad_before(self):
BAD_TABLE_NAME = ('nonempty-section-before' +
'projects/' + self.PROJECT +
'/instances/' + self.INSTANCE_ID +
'/tables/' + self.TABLE_ID)
with self.assertRaises(ValueError):
self._list_tables_helper(table_name=BAD_TABLE_NAME)
class Test__prepare_create_request(unittest2.TestCase):
PROJECT = 'PROJECT'
PARENT = 'projects/' + PROJECT
LOCATION_ID = 'locname'
LOCATION_NAME = 'projects/' + PROJECT + '/locations/' + LOCATION_ID
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID
CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID
def _callFUT(self, instance, **kw):
from gcloud.bigtable.instance import _prepare_create_request
return _prepare_create_request(instance, **kw)
def test_w_defaults(self):
from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb)
from gcloud.bigtable.instance import Instance
client = _Client(self.PROJECT)
instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID)
request_pb = self._callFUT(instance)
self.assertTrue(isinstance(request_pb,
messages_v2_pb.CreateInstanceRequest))
self.assertEqual(request_pb.instance_id, self.INSTANCE_ID)
self.assertEqual(request_pb.parent, self.PARENT)
self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance))
self.assertEqual(request_pb.instance.name, u'')
self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID)
# An instance must also define a same-named cluster
cluster = request_pb.clusters[self.INSTANCE_ID]
self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster))
self.assertEqual(cluster.name, self.CLUSTER_NAME)
self.assertEqual(cluster.location, self.LOCATION_NAME)
self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES)
def test_w_explicit_serve_nodes(self):
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb)
from gcloud.bigtable.instance import Instance
DISPLAY_NAME = u'DISPLAY_NAME'
SERVE_NODES = 5
client = _Client(self.PROJECT)
instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID,
display_name=DISPLAY_NAME,
serve_nodes=SERVE_NODES)
request_pb = self._callFUT(instance)
self.assertTrue(isinstance(request_pb,
messages_v2_pb.CreateInstanceRequest))
self.assertEqual(request_pb.instance_id, self.INSTANCE_ID)
self.assertEqual(request_pb.parent,
'projects/' + self.PROJECT)
self.assertTrue(isinstance(request_pb.instance, data_v2_pb2.Instance))
self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME)
# An instance must also define a same-named cluster
cluster = request_pb.clusters[self.INSTANCE_ID]
self.assertTrue(isinstance(cluster, data_v2_pb2.Cluster))
self.assertEqual(cluster.location, self.LOCATION_NAME)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
class Test__parse_pb_any_to_native(unittest2.TestCase):
def _callFUT(self, any_val, expected_type=None):
from gcloud.bigtable.instance import _parse_pb_any_to_native
return _parse_pb_any_to_native(any_val, expected_type=expected_type)
def test_with_known_type_url(self):
from google.protobuf import any_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
from gcloud.bigtable import instance as MUT
TYPE_URL = 'type.googleapis.com/' + data_v2_pb2._CELL.full_name
fake_type_url_map = {TYPE_URL: data_v2_pb2.Cell}
cell = data_v2_pb2.Cell(
timestamp_micros=0,
value=b'foobar',
)
any_val = any_pb2.Any(
type_url=TYPE_URL,
value=cell.SerializeToString(),
)
with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map):
result = self._callFUT(any_val)
self.assertEqual(result, cell)
def test_with_create_instance_metadata(self):
from google.protobuf import any_pb2
from google.protobuf.timestamp_pb2 import Timestamp
from gcloud.bigtable._generated_v2 import (
instance_pb2 as data_v2_pb2)
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb)
TYPE_URL = ('type.googleapis.com/' +
messages_v2_pb._CREATEINSTANCEMETADATA.full_name)
metadata = messages_v2_pb.CreateInstanceMetadata(
request_time=Timestamp(seconds=1, nanos=1234),
finish_time=Timestamp(seconds=10, nanos=891011),
original_request=messages_v2_pb.CreateInstanceRequest(
parent='foo',
instance_id='bar',
instance=data_v2_pb2.Instance(
display_name='quux',
),
),
)
any_val = any_pb2.Any(
type_url=TYPE_URL,
value=metadata.SerializeToString(),
)
result = self._callFUT(any_val)
self.assertEqual(result, metadata)
def test_unknown_type_url(self):
from google.protobuf import any_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable import instance as MUT
fake_type_url_map = {}
any_val = any_pb2.Any()
with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map):
with self.assertRaises(KeyError):
self._callFUT(any_val)
def test_disagreeing_type_url(self):
from google.protobuf import any_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable import instance as MUT
TYPE_URL1 = 'foo'
TYPE_URL2 = 'bar'
fake_type_url_map = {TYPE_URL1: None}
any_val = any_pb2.Any(type_url=TYPE_URL2)
with _Monkey(MUT, _TYPE_URL_MAP=fake_type_url_map):
with self.assertRaises(ValueError):
self._callFUT(any_val, expected_type=TYPE_URL1)
class Test__process_operation(unittest2.TestCase):
def _callFUT(self, operation_pb):
from gcloud.bigtable.instance import _process_operation
return _process_operation(operation_pb)
def test_it(self):
from google.longrunning import operations_pb2
from gcloud._testing import _Monkey
from gcloud.bigtable._generated_v2 import (
bigtable_instance_admin_pb2 as messages_v2_pb)
from gcloud.bigtable import instance as MUT
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
LOCATION_ID = 'location'
OP_ID = 234
OPERATION_NAME = (
'operations/projects/%s/instances/%s/locations/%s/operations/%d' %
(PROJECT, INSTANCE_ID, LOCATION_ID, OP_ID))
current_op = operations_pb2.Operation(name=OPERATION_NAME)
# Create mocks.
request_metadata = messages_v2_pb.CreateInstanceMetadata()
parse_pb_any_called = []
def mock_parse_pb_any_to_native(any_val, expected_type=None):
parse_pb_any_called.append((any_val, expected_type))
return request_metadata
expected_operation_begin = object()
ts_to_dt_called = []
def mock_pb_timestamp_to_datetime(timestamp):
ts_to_dt_called.append(timestamp)
return expected_operation_begin
# Exectute method with mocks in place.
with _Monkey(MUT, _parse_pb_any_to_native=mock_parse_pb_any_to_native,
_pb_timestamp_to_datetime=mock_pb_timestamp_to_datetime):
op_id, loc_id, op_begin = self._callFUT(current_op)
# Check outputs.
self.assertEqual(op_id, OP_ID)
self.assertTrue(op_begin is expected_operation_begin)
self.assertEqual(loc_id, LOCATION_ID)
# Check mocks were used correctly.
self.assertEqual(parse_pb_any_called, [(current_op.metadata, None)])
self.assertEqual(ts_to_dt_called, [request_metadata.request_time])
def test_op_name_parsing_failure(self):
from google.longrunning import operations_pb2
operation_pb = operations_pb2.Operation(name='invalid')
with self.assertRaises(ValueError):
self._callFUT(operation_pb)
class _Client(object):
def __init__(self, project, timeout_seconds=None):
self.project = project
self.project_name = 'projects/' + self.project
self.timeout_seconds = timeout_seconds
def copy(self):
from copy import deepcopy
return deepcopy(self)
def __eq__(self, other):
return (other.project == self.project and
other.project_name == self.project_name and
other.timeout_seconds == self.timeout_seconds)

View file

@ -0,0 +1,915 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class Test_SetDeleteRow(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import _SetDeleteRow
return _SetDeleteRow
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test__get_mutations_virtual(self):
row = self._makeOne(b'row-key', None)
with self.assertRaises(NotImplementedError):
row._get_mutations(None)
class TestDirectRow(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import DirectRow
return DirectRow
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
row_key = b'row_key'
table = object()
row = self._makeOne(row_key, table)
self.assertEqual(row._row_key, row_key)
self.assertTrue(row._table is table)
self.assertEqual(row._pb_mutations, [])
def test_constructor_with_unicode(self):
row_key = u'row_key'
row_key_bytes = b'row_key'
table = object()
row = self._makeOne(row_key, table)
self.assertEqual(row._row_key, row_key_bytes)
self.assertTrue(row._table is table)
def test_constructor_with_non_bytes(self):
row_key = object()
with self.assertRaises(TypeError):
self._makeOne(row_key, None)
def test__get_mutations(self):
row_key = b'row_key'
row = self._makeOne(row_key, None)
row._pb_mutations = mutations = object()
self.assertTrue(mutations is row._get_mutations(None))
def _set_cell_helper(self, column=None, column_bytes=None,
value=b'foobar', timestamp=None,
timestamp_micros=-1):
import six
import struct
row_key = b'row_key'
column_family_id = u'column_family_id'
if column is None:
column = b'column'
table = object()
row = self._makeOne(row_key, table)
self.assertEqual(row._pb_mutations, [])
row.set_cell(column_family_id, column,
value, timestamp=timestamp)
if isinstance(value, six.integer_types):
value = struct.pack('>q', value)
expected_pb = _MutationPB(
set_cell=_MutationSetCellPB(
family_name=column_family_id,
column_qualifier=column_bytes or column,
timestamp_micros=timestamp_micros,
value=value,
),
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_set_cell(self):
self._set_cell_helper()
def test_set_cell_with_string_column(self):
column_bytes = b'column'
column_non_bytes = u'column'
self._set_cell_helper(column=column_non_bytes,
column_bytes=column_bytes)
def test_set_cell_with_integer_value(self):
value = 1337
self._set_cell_helper(value=value)
def test_set_cell_with_non_bytes_value(self):
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
value = object() # Not bytes
with self.assertRaises(TypeError):
row.set_cell(column_family_id, column, value)
def test_set_cell_with_non_null_timestamp(self):
import datetime
from gcloud._helpers import _EPOCH
microseconds = 898294371
millis_granularity = microseconds - (microseconds % 1000)
timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds)
self._set_cell_helper(timestamp=timestamp,
timestamp_micros=millis_granularity)
def test_delete(self):
row_key = b'row_key'
row = self._makeOne(row_key, object())
self.assertEqual(row._pb_mutations, [])
row.delete()
expected_pb = _MutationPB(
delete_from_row=_MutationDeleteFromRowPB(),
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cell(self):
klass = self._getTargetClass()
class MockRow(klass):
def __init__(self, *args, **kwargs):
super(MockRow, self).__init__(*args, **kwargs)
self._args = []
self._kwargs = []
# Replace the called method with one that logs arguments.
def _delete_cells(self, *args, **kwargs):
self._args.append(args)
self._kwargs.append(kwargs)
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
mock_row = MockRow(row_key, table)
# Make sure no values are set before calling the method.
self.assertEqual(mock_row._pb_mutations, [])
self.assertEqual(mock_row._args, [])
self.assertEqual(mock_row._kwargs, [])
# Actually make the request against the mock class.
time_range = object()
mock_row.delete_cell(column_family_id, column, time_range=time_range)
self.assertEqual(mock_row._pb_mutations, [])
self.assertEqual(mock_row._args, [(column_family_id, [column])])
self.assertEqual(mock_row._kwargs, [{
'state': None,
'time_range': time_range,
}])
def test_delete_cells_non_iterable(self):
row_key = b'row_key'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = object() # Not iterable
with self.assertRaises(TypeError):
row.delete_cells(column_family_id, columns)
def test_delete_cells_all_columns(self):
row_key = b'row_key'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
klass = self._getTargetClass()
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, klass.ALL_COLUMNS)
expected_pb = _MutationPB(
delete_from_family=_MutationDeleteFromFamilyPB(
family_name=column_family_id,
),
)
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cells_no_columns(self):
row_key = b'row_key'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = []
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns)
self.assertEqual(row._pb_mutations, [])
def _delete_cells_helper(self, time_range=None):
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = [column]
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns, time_range=time_range)
expected_pb = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id,
column_qualifier=column,
),
)
if time_range is not None:
expected_pb.delete_from_column.time_range.CopyFrom(
time_range.to_pb())
self.assertEqual(row._pb_mutations, [expected_pb])
def test_delete_cells_no_time_range(self):
self._delete_cells_helper()
def test_delete_cells_with_time_range(self):
import datetime
from gcloud._helpers import _EPOCH
from gcloud.bigtable.row_filters import TimestampRange
microseconds = 30871000 # Makes sure already milliseconds granularity
start = _EPOCH + datetime.timedelta(microseconds=microseconds)
time_range = TimestampRange(start=start)
self._delete_cells_helper(time_range=time_range)
def test_delete_cells_with_bad_column(self):
# This makes sure a failure on one of the columns doesn't leave
# the row's mutations in a bad state.
row_key = b'row_key'
column = b'column'
column_family_id = u'column_family_id'
table = object()
row = self._makeOne(row_key, table)
columns = [column, object()]
self.assertEqual(row._pb_mutations, [])
with self.assertRaises(TypeError):
row.delete_cells(column_family_id, columns)
self.assertEqual(row._pb_mutations, [])
def test_delete_cells_with_string_columns(self):
row_key = b'row_key'
column_family_id = u'column_family_id'
column1 = u'column1'
column1_bytes = b'column1'
column2 = u'column2'
column2_bytes = b'column2'
table = object()
row = self._makeOne(row_key, table)
columns = [column1, column2]
self.assertEqual(row._pb_mutations, [])
row.delete_cells(column_family_id, columns)
expected_pb1 = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id,
column_qualifier=column1_bytes,
),
)
expected_pb2 = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id,
column_qualifier=column2_bytes,
),
)
self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2])
def test_commit(self):
from google.protobuf import empty_pb2
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
table_name = 'projects/more-stuff'
column_family_id = u'column_family_id'
column = b'column'
timeout_seconds = 711
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
row = self._makeOne(row_key, table)
# Create request_pb
value = b'bytes-value'
mutation = _MutationPB(
set_cell=_MutationSetCellPB(
family_name=column_family_id,
column_qualifier=column,
timestamp_micros=-1, # Default value.
value=value,
),
)
request_pb = _MutateRowRequestPB(
table_name=table_name,
row_key=row_key,
mutations=[mutation],
)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # commit() has no return value when no filter.
# Perform the method and check the result.
row.set_cell(column_family_id, column, value)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'MutateRow',
(request_pb, timeout_seconds),
{},
)])
self.assertEqual(row._pb_mutations, [])
def test_commit_too_many_mutations(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table = object()
row = self._makeOne(row_key, table)
row._pb_mutations = [1, 2, 3]
num_mutations = len(row._pb_mutations)
with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
def test_commit_no_mutations(self):
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
client = _Client()
table = _Table(None, client=client)
row = self._makeOne(row_key, table)
self.assertEqual(row._pb_mutations, [])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertEqual(result, None)
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
class TestConditionalRow(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import ConditionalRow
return ConditionalRow
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
row_key = b'row_key'
table = object()
filter_ = object()
row = self._makeOne(row_key, table, filter_=filter_)
self.assertEqual(row._row_key, row_key)
self.assertTrue(row._table is table)
self.assertTrue(row._filter is filter_)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
def test__get_mutations(self):
row_key = b'row_key'
filter_ = object()
row = self._makeOne(row_key, None, filter_=filter_)
row._true_pb_mutations = true_mutations = object()
row._false_pb_mutations = false_mutations = object()
self.assertTrue(true_mutations is row._get_mutations(True))
self.assertTrue(false_mutations is row._get_mutations(False))
self.assertTrue(false_mutations is row._get_mutations(None))
def test_commit(self):
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable.row_filters import RowSampleFilter
row_key = b'row_key'
table_name = 'projects/more-stuff'
column_family_id1 = u'column_family_id1'
column_family_id2 = u'column_family_id2'
column_family_id3 = u'column_family_id3'
column1 = b'column1'
column2 = b'column2'
timeout_seconds = 262
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
row_filter = RowSampleFilter(0.33)
row = self._makeOne(row_key, table, filter_=row_filter)
# Create request_pb
value1 = b'bytes-value'
mutation1 = _MutationPB(
set_cell=_MutationSetCellPB(
family_name=column_family_id1,
column_qualifier=column1,
timestamp_micros=-1, # Default value.
value=value1,
),
)
mutation2 = _MutationPB(
delete_from_row=_MutationDeleteFromRowPB(),
)
mutation3 = _MutationPB(
delete_from_column=_MutationDeleteFromColumnPB(
family_name=column_family_id2,
column_qualifier=column2,
),
)
mutation4 = _MutationPB(
delete_from_family=_MutationDeleteFromFamilyPB(
family_name=column_family_id3,
),
)
request_pb = _CheckAndMutateRowRequestPB(
table_name=table_name,
row_key=row_key,
predicate_filter=row_filter.to_pb(),
true_mutations=[mutation1, mutation3, mutation4],
false_mutations=[mutation2],
)
# Create response_pb
predicate_matched = True
response_pb = _CheckAndMutateRowResponsePB(
predicate_matched=predicate_matched)
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = predicate_matched
# Perform the method and check the result.
row.set_cell(column_family_id1, column1, value1, state=True)
row.delete(state=False)
row.delete_cell(column_family_id2, column2, state=True)
row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CheckAndMutateRow',
(request_pb, timeout_seconds),
{},
)])
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
def test_commit_too_many_mutations(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table = object()
filter_ = object()
row = self._makeOne(row_key, table, filter_=filter_)
row._true_pb_mutations = [1, 2, 3]
num_mutations = len(row._true_pb_mutations)
with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
def test_commit_no_mutations(self):
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
client = _Client()
table = _Table(None, client=client)
filter_ = object()
row = self._makeOne(row_key, table, filter_=filter_)
self.assertEqual(row._true_pb_mutations, [])
self.assertEqual(row._false_pb_mutations, [])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertEqual(result, None)
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
class TestAppendRow(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row import AppendRow
return AppendRow
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
row_key = b'row_key'
table = object()
row = self._makeOne(row_key, table)
self.assertEqual(row._row_key, row_key)
self.assertTrue(row._table is table)
self.assertEqual(row._rule_pb_list, [])
def test_clear(self):
row_key = b'row_key'
table = object()
row = self._makeOne(row_key, table)
row._rule_pb_list = [1, 2, 3]
row.clear()
self.assertEqual(row._rule_pb_list, [])
def test_append_cell_value(self):
table = object()
row_key = b'row_key'
row = self._makeOne(row_key, table)
self.assertEqual(row._rule_pb_list, [])
column = b'column'
column_family_id = u'column_family_id'
value = b'bytes-val'
row.append_cell_value(column_family_id, column, value)
expected_pb = _ReadModifyWriteRulePB(
family_name=column_family_id, column_qualifier=column,
append_value=value)
self.assertEqual(row._rule_pb_list, [expected_pb])
def test_increment_cell_value(self):
table = object()
row_key = b'row_key'
row = self._makeOne(row_key, table)
self.assertEqual(row._rule_pb_list, [])
column = b'column'
column_family_id = u'column_family_id'
int_value = 281330
row.increment_cell_value(column_family_id, column, int_value)
expected_pb = _ReadModifyWriteRulePB(
family_name=column_family_id, column_qualifier=column,
increment_amount=int_value)
self.assertEqual(row._rule_pb_list, [expected_pb])
def test_commit(self):
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table_name = 'projects/more-stuff'
column_family_id = u'column_family_id'
column = b'column'
timeout_seconds = 87
client = _Client(timeout_seconds=timeout_seconds)
table = _Table(table_name, client=client)
row = self._makeOne(row_key, table)
# Create request_pb
value = b'bytes-value'
# We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value).
request_pb = _ReadModifyWriteRowRequestPB(
table_name=table_name,
row_key=row_key,
rules=[
_ReadModifyWriteRulePB(
family_name=column_family_id,
column_qualifier=column,
append_value=value,
),
],
)
# Create response_pb
response_pb = object()
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_pb)
# Create expected_result.
row_responses = []
expected_result = object()
def mock_parse_rmw_row_response(row_response):
row_responses.append(row_response)
return expected_result
# Perform the method and check the result.
with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response):
row.append_cell_value(column_family_id, column, value)
result = row.commit()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ReadModifyWriteRow',
(request_pb, timeout_seconds),
{},
)])
self.assertEqual(row_responses, [response_pb])
self.assertEqual(row._rule_pb_list, [])
def test_commit_no_rules(self):
from gcloud.bigtable._testing import _FakeStub
row_key = b'row_key'
client = _Client()
table = _Table(None, client=client)
row = self._makeOne(row_key, table)
self.assertEqual(row._rule_pb_list, [])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub()
# Perform the method and check the result.
result = row.commit()
self.assertEqual(result, {})
# Make sure no request was sent.
self.assertEqual(stub.method_calls, [])
def test_commit_too_many_mutations(self):
from gcloud._testing import _Monkey
from gcloud.bigtable import row as MUT
row_key = b'row_key'
table = object()
row = self._makeOne(row_key, table)
row._rule_pb_list = [1, 2, 3]
num_mutations = len(row._rule_pb_list)
with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1):
with self.assertRaises(ValueError):
row.commit()
class Test__parse_rmw_row_response(unittest2.TestCase):
def _callFUT(self, row_response):
from gcloud.bigtable.row import _parse_rmw_row_response
return _parse_rmw_row_response(row_response)
def test_it(self):
from gcloud._helpers import _datetime_from_microseconds
col_fam1 = u'col-fam-id'
col_fam2 = u'col-fam-id2'
col_name1 = b'col-name1'
col_name2 = b'col-name2'
col_name3 = b'col-name3-but-other-fam'
cell_val1 = b'cell-val'
cell_val2 = b'cell-val-newer'
cell_val3 = b'altcol-cell-val'
cell_val4 = b'foo'
microseconds = 1000871
timestamp = _datetime_from_microseconds(microseconds)
expected_output = {
col_fam1: {
col_name1: [
(cell_val1, timestamp),
(cell_val2, timestamp),
],
col_name2: [
(cell_val3, timestamp),
],
},
col_fam2: {
col_name3: [
(cell_val4, timestamp),
],
},
}
response_row = _RowPB(
families=[
_FamilyPB(
name=col_fam1,
columns=[
_ColumnPB(
qualifier=col_name1,
cells=[
_CellPB(
value=cell_val1,
timestamp_micros=microseconds,
),
_CellPB(
value=cell_val2,
timestamp_micros=microseconds,
),
],
),
_ColumnPB(
qualifier=col_name2,
cells=[
_CellPB(
value=cell_val3,
timestamp_micros=microseconds,
),
],
),
],
),
_FamilyPB(
name=col_fam2,
columns=[
_ColumnPB(
qualifier=col_name3,
cells=[
_CellPB(
value=cell_val4,
timestamp_micros=microseconds,
),
],
),
],
),
],
)
sample_input = _ReadModifyWriteRowResponsePB(row=response_row)
self.assertEqual(expected_output, self._callFUT(sample_input))
class Test__parse_family_pb(unittest2.TestCase):
def _callFUT(self, family_pb):
from gcloud.bigtable.row import _parse_family_pb
return _parse_family_pb(family_pb)
def test_it(self):
from gcloud._helpers import _datetime_from_microseconds
col_fam1 = u'col-fam-id'
col_name1 = b'col-name1'
col_name2 = b'col-name2'
cell_val1 = b'cell-val'
cell_val2 = b'cell-val-newer'
cell_val3 = b'altcol-cell-val'
microseconds = 5554441037
timestamp = _datetime_from_microseconds(microseconds)
expected_dict = {
col_name1: [
(cell_val1, timestamp),
(cell_val2, timestamp),
],
col_name2: [
(cell_val3, timestamp),
],
}
expected_output = (col_fam1, expected_dict)
sample_input = _FamilyPB(
name=col_fam1,
columns=[
_ColumnPB(
qualifier=col_name1,
cells=[
_CellPB(
value=cell_val1,
timestamp_micros=microseconds,
),
_CellPB(
value=cell_val2,
timestamp_micros=microseconds,
),
],
),
_ColumnPB(
qualifier=col_name2,
cells=[
_CellPB(
value=cell_val3,
timestamp_micros=microseconds,
),
],
),
],
)
self.assertEqual(expected_output, self._callFUT(sample_input))
def _CheckAndMutateRowRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw)
def _CheckAndMutateRowResponsePB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw)
def _MutateRowRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.MutateRowRequest(*args, **kw)
def _ReadModifyWriteRowRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw)
def _ReadModifyWriteRowResponsePB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw)
def _CellPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Cell(*args, **kw)
def _ColumnPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Column(*args, **kw)
def _FamilyPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Family(*args, **kw)
def _MutationPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Mutation(*args, **kw)
def _MutationSetCellPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Mutation.SetCell(*args, **kw)
def _MutationDeleteFromColumnPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw)
def _MutationDeleteFromFamilyPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw)
def _MutationDeleteFromRowPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw)
def _RowPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.Row(*args, **kw)
def _ReadModifyWriteRulePB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
return data_v2_pb2.ReadModifyWriteRule(*args, **kw)
class _Client(object):
data_stub = None
def __init__(self, timeout_seconds=None):
self.timeout_seconds = timeout_seconds
class _Instance(object):
def __init__(self, client=None):
self._client = client
class _Table(object):
def __init__(self, name, client=None):
self.name = name
self._instance = _Instance(client)

View file

@ -0,0 +1,727 @@
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestCell(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row_data import Cell
return Cell
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _from_pb_test_helper(self, labels=None):
import datetime
from gcloud._helpers import _EPOCH
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
timestamp_micros = 18738724000 # Make sure millis granularity
timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros)
value = b'value-bytes'
if labels is None:
cell_pb = data_v2_pb2.Cell(
value=value, timestamp_micros=timestamp_micros)
cell_expected = self._makeOne(value, timestamp)
else:
cell_pb = data_v2_pb2.Cell(
value=value, timestamp_micros=timestamp_micros, labels=labels)
cell_expected = self._makeOne(value, timestamp, labels=labels)
klass = self._getTargetClass()
result = klass.from_pb(cell_pb)
self.assertEqual(result, cell_expected)
def test_from_pb(self):
self._from_pb_test_helper()
def test_from_pb_with_labels(self):
labels = [u'label1', u'label2']
self._from_pb_test_helper(labels)
def test_constructor(self):
value = object()
timestamp = object()
cell = self._makeOne(value, timestamp)
self.assertEqual(cell.value, value)
self.assertEqual(cell.timestamp, timestamp)
def test___eq__(self):
value = object()
timestamp = object()
cell1 = self._makeOne(value, timestamp)
cell2 = self._makeOne(value, timestamp)
self.assertEqual(cell1, cell2)
def test___eq__type_differ(self):
cell1 = self._makeOne(None, None)
cell2 = object()
self.assertNotEqual(cell1, cell2)
def test___ne__same_value(self):
value = object()
timestamp = object()
cell1 = self._makeOne(value, timestamp)
cell2 = self._makeOne(value, timestamp)
comparison_val = (cell1 != cell2)
self.assertFalse(comparison_val)
def test___ne__(self):
value1 = 'value1'
value2 = 'value2'
timestamp = object()
cell1 = self._makeOne(value1, timestamp)
cell2 = self._makeOne(value2, timestamp)
self.assertNotEqual(cell1, cell2)
class TestPartialRowData(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row_data import PartialRowData
return PartialRowData
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
row_key = object()
partial_row_data = self._makeOne(row_key)
self.assertTrue(partial_row_data._row_key is row_key)
self.assertEqual(partial_row_data._cells, {})
def test___eq__(self):
row_key = object()
partial_row_data1 = self._makeOne(row_key)
partial_row_data2 = self._makeOne(row_key)
self.assertEqual(partial_row_data1, partial_row_data2)
def test___eq__type_differ(self):
partial_row_data1 = self._makeOne(None)
partial_row_data2 = object()
self.assertNotEqual(partial_row_data1, partial_row_data2)
def test___ne__same_value(self):
row_key = object()
partial_row_data1 = self._makeOne(row_key)
partial_row_data2 = self._makeOne(row_key)
comparison_val = (partial_row_data1 != partial_row_data2)
self.assertFalse(comparison_val)
def test___ne__(self):
row_key1 = object()
partial_row_data1 = self._makeOne(row_key1)
row_key2 = object()
partial_row_data2 = self._makeOne(row_key2)
self.assertNotEqual(partial_row_data1, partial_row_data2)
def test___ne__cells(self):
row_key = object()
partial_row_data1 = self._makeOne(row_key)
partial_row_data1._cells = object()
partial_row_data2 = self._makeOne(row_key)
self.assertNotEqual(partial_row_data1, partial_row_data2)
def test_to_dict(self):
cell1 = object()
cell2 = object()
cell3 = object()
family_name1 = u'name1'
family_name2 = u'name2'
qual1 = b'col1'
qual2 = b'col2'
qual3 = b'col3'
partial_row_data = self._makeOne(None)
partial_row_data._cells = {
family_name1: {
qual1: cell1,
qual2: cell2,
},
family_name2: {
qual3: cell3,
},
}
result = partial_row_data.to_dict()
expected_result = {
b'name1:col1': cell1,
b'name1:col2': cell2,
b'name2:col3': cell3,
}
self.assertEqual(result, expected_result)
def test_cells_property(self):
partial_row_data = self._makeOne(None)
cells = {1: 2}
partial_row_data._cells = cells
# Make sure we get a copy, not the original.
self.assertFalse(partial_row_data.cells is cells)
self.assertEqual(partial_row_data.cells, cells)
def test_row_key_getter(self):
row_key = object()
partial_row_data = self._makeOne(row_key)
self.assertTrue(partial_row_data.row_key is row_key)
class TestPartialRowsData(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.bigtable.row_data import PartialRowsData
return PartialRowsData
def _getDoNothingClass(self):
klass = self._getTargetClass()
class FakePartialRowsData(klass):
def __init__(self, *args, **kwargs):
super(FakePartialRowsData, self).__init__(*args, **kwargs)
self._consumed = []
def consume_next(self):
value = self._response_iterator.next()
self._consumed.append(value)
return value
return FakePartialRowsData
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
response_iterator = object()
partial_rows_data = self._makeOne(response_iterator)
self.assertTrue(partial_rows_data._response_iterator
is response_iterator)
self.assertEqual(partial_rows_data._rows, {})
def test___eq__(self):
response_iterator = object()
partial_rows_data1 = self._makeOne(response_iterator)
partial_rows_data2 = self._makeOne(response_iterator)
self.assertEqual(partial_rows_data1, partial_rows_data2)
def test___eq__type_differ(self):
partial_rows_data1 = self._makeOne(None)
partial_rows_data2 = object()
self.assertNotEqual(partial_rows_data1, partial_rows_data2)
def test___ne__same_value(self):
response_iterator = object()
partial_rows_data1 = self._makeOne(response_iterator)
partial_rows_data2 = self._makeOne(response_iterator)
comparison_val = (partial_rows_data1 != partial_rows_data2)
self.assertFalse(comparison_val)
def test___ne__(self):
response_iterator1 = object()
partial_rows_data1 = self._makeOne(response_iterator1)
response_iterator2 = object()
partial_rows_data2 = self._makeOne(response_iterator2)
self.assertNotEqual(partial_rows_data1, partial_rows_data2)
def test_state_start(self):
prd = self._makeOne([])
self.assertEqual(prd.state, prd.START)
def test_state_new_row_w_row(self):
prd = self._makeOne([])
prd._last_scanned_row_key = ''
prd._row = object()
self.assertEqual(prd.state, prd.NEW_ROW)
def test_rows_getter(self):
partial_rows_data = self._makeOne(None)
partial_rows_data._rows = value = object()
self.assertTrue(partial_rows_data.rows is value)
def test_cancel(self):
response_iterator = _MockCancellableIterator()
partial_rows_data = self._makeOne(response_iterator)
self.assertEqual(response_iterator.cancel_calls, 0)
partial_rows_data.cancel()
self.assertEqual(response_iterator.cancel_calls, 1)
# 'consume_nest' tested via 'TestPartialRowsData_JSON_acceptance_tests'
def test_consume_all(self):
klass = self._getDoNothingClass()
value1, value2, value3 = object(), object(), object()
response_iterator = _MockCancellableIterator(value1, value2, value3)
partial_rows_data = klass(response_iterator)
self.assertEqual(partial_rows_data._consumed, [])
partial_rows_data.consume_all()
self.assertEqual(
partial_rows_data._consumed, [value1, value2, value3])
def test_consume_all_with_max_loops(self):
klass = self._getDoNothingClass()
value1, value2, value3 = object(), object(), object()
response_iterator = _MockCancellableIterator(value1, value2, value3)
partial_rows_data = klass(response_iterator)
self.assertEqual(partial_rows_data._consumed, [])
partial_rows_data.consume_all(max_loops=1)
self.assertEqual(partial_rows_data._consumed, [value1])
# Make sure the iterator still has the remaining values.
self.assertEqual(
list(response_iterator.iter_values), [value2, value3])
def test__copy_from_current_unset(self):
prd = self._makeOne([])
chunks = _generate_cell_chunks([''])
chunk = chunks[0]
prd._copy_from_current(chunk)
self.assertEqual(chunk.row_key, b'')
self.assertEqual(chunk.family_name.value, u'')
self.assertEqual(chunk.qualifier.value, b'')
self.assertEqual(chunk.timestamp_micros, 0)
self.assertEqual(chunk.labels, [])
def test__copy_from_current_blank(self):
ROW_KEY = b'RK'
FAMILY_NAME = u'A'
QUALIFIER = b'C'
TIMESTAMP_MICROS = 100
LABELS = ['L1', 'L2']
prd = self._makeOne([])
prd._cell = _PartialCellData()
chunks = _generate_cell_chunks([''])
chunk = chunks[0]
chunk.row_key = ROW_KEY
chunk.family_name.value = FAMILY_NAME
chunk.qualifier.value = QUALIFIER
chunk.timestamp_micros = TIMESTAMP_MICROS
chunk.labels.extend(LABELS)
prd._copy_from_current(chunk)
self.assertEqual(chunk.row_key, ROW_KEY)
self.assertEqual(chunk.family_name.value, FAMILY_NAME)
self.assertEqual(chunk.qualifier.value, QUALIFIER)
self.assertEqual(chunk.timestamp_micros, TIMESTAMP_MICROS)
self.assertEqual(chunk.labels, LABELS)
def test__copy_from_previous_unset(self):
prd = self._makeOne([])
cell = _PartialCellData()
prd._copy_from_previous(cell)
self.assertEqual(cell.row_key, '')
self.assertEqual(cell.family_name, u'')
self.assertEqual(cell.qualifier, b'')
self.assertEqual(cell.timestamp_micros, 0)
self.assertEqual(cell.labels, [])
def test__copy_from_previous_blank(self):
ROW_KEY = 'RK'
FAMILY_NAME = u'A'
QUALIFIER = b'C'
TIMESTAMP_MICROS = 100
LABELS = ['L1', 'L2']
prd = self._makeOne([])
cell = _PartialCellData(
row_key=ROW_KEY,
family_name=FAMILY_NAME,
qualifier=QUALIFIER,
timestamp_micros=TIMESTAMP_MICROS,
labels=LABELS,
)
prd._previous_cell = _PartialCellData()
prd._copy_from_previous(cell)
self.assertEqual(cell.row_key, ROW_KEY)
self.assertEqual(cell.family_name, FAMILY_NAME)
self.assertEqual(cell.qualifier, QUALIFIER)
self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS)
self.assertEqual(cell.labels, LABELS)
def test__copy_from_previous_filled(self):
ROW_KEY = 'RK'
FAMILY_NAME = u'A'
QUALIFIER = b'C'
TIMESTAMP_MICROS = 100
LABELS = ['L1', 'L2']
prd = self._makeOne([])
prd._previous_cell = _PartialCellData(
row_key=ROW_KEY,
family_name=FAMILY_NAME,
qualifier=QUALIFIER,
timestamp_micros=TIMESTAMP_MICROS,
labels=LABELS,
)
cell = _PartialCellData()
prd._copy_from_previous(cell)
self.assertEqual(cell.row_key, ROW_KEY)
self.assertEqual(cell.family_name, FAMILY_NAME)
self.assertEqual(cell.qualifier, QUALIFIER)
self.assertEqual(cell.timestamp_micros, 0)
self.assertEqual(cell.labels, [])
def test__save_row_no_cell(self):
ROW_KEY = 'RK'
prd = self._makeOne([])
row = prd._row = _Dummy(row_key=ROW_KEY)
prd._cell = None
prd._save_current_row()
self.assertTrue(prd._rows[ROW_KEY] is row)
def test_invalid_last_scanned_row_key_on_start(self):
from gcloud.bigtable.row_data import InvalidReadRowsResponse
response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC')
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
with self.assertRaises(InvalidReadRowsResponse):
prd.consume_next()
def test_valid_last_scanned_row_key_on_start(self):
response = _ReadRowsResponseV2(
chunks=(), last_scanned_row_key='AFTER')
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
prd._last_scanned_row_key = 'BEFORE'
prd.consume_next()
self.assertEqual(prd._last_scanned_row_key, 'AFTER')
def test_invalid_empty_chunk(self):
from gcloud.bigtable.row_data import InvalidChunk
chunks = _generate_cell_chunks([''])
response = _ReadRowsResponseV2(chunks)
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
with self.assertRaises(InvalidChunk):
prd.consume_next()
def test_invalid_empty_second_chunk(self):
from gcloud.bigtable.row_data import InvalidChunk
chunks = _generate_cell_chunks(['', ''])
first = chunks[0]
first.row_key = b'RK'
first.family_name.value = 'A'
first.qualifier.value = b'C'
response = _ReadRowsResponseV2(chunks)
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
with self.assertRaises(InvalidChunk):
prd.consume_next()
class TestPartialRowsData_JSON_acceptance_tests(unittest2.TestCase):
_json_tests = None
def _getTargetClass(self):
from gcloud.bigtable.row_data import PartialRowsData
return PartialRowsData
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _load_json_test(self, test_name):
import os
if self.__class__._json_tests is None:
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'read-rows-acceptance-test.json')
raw = _parse_readrows_acceptance_tests(filename)
tests = self.__class__._json_tests = {}
for (name, chunks, results) in raw:
tests[name] = chunks, results
return self.__class__._json_tests[test_name]
# JSON Error cases: invalid chunks
def _fail_during_consume(self, testcase_name):
from gcloud.bigtable.row_data import InvalidChunk
chunks, results = self._load_json_test(testcase_name)
response = _ReadRowsResponseV2(chunks)
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
with self.assertRaises(InvalidChunk):
prd.consume_next()
expected_result = self._sort_flattend_cells(
[result for result in results if not result['error']])
flattened = self._sort_flattend_cells(_flatten_cells(prd))
self.assertEqual(flattened, expected_result)
def test_invalid_no_cell_key_before_commit(self):
self._fail_during_consume('invalid - no cell key before commit')
def test_invalid_no_cell_key_before_value(self):
self._fail_during_consume('invalid - no cell key before value')
def test_invalid_new_col_family_wo_qualifier(self):
self._fail_during_consume(
'invalid - new col family must specify qualifier')
def test_invalid_no_commit_between_rows(self):
self._fail_during_consume('invalid - no commit between rows')
def test_invalid_no_commit_after_first_row(self):
self._fail_during_consume('invalid - no commit after first row')
def test_invalid_duplicate_row_key(self):
self._fail_during_consume('invalid - duplicate row key')
def test_invalid_new_row_missing_row_key(self):
self._fail_during_consume('invalid - new row missing row key')
def test_invalid_bare_reset(self):
self._fail_during_consume('invalid - bare reset')
def test_invalid_bad_reset_no_commit(self):
self._fail_during_consume('invalid - bad reset, no commit')
def test_invalid_missing_key_after_reset(self):
self._fail_during_consume('invalid - missing key after reset')
def test_invalid_reset_with_chunk(self):
self._fail_during_consume('invalid - reset with chunk')
def test_invalid_commit_with_chunk(self):
self._fail_during_consume('invalid - commit with chunk')
# JSON Error cases: incomplete final row
def _sort_flattend_cells(self, flattened):
import operator
key_func = operator.itemgetter('rk', 'fm', 'qual')
return sorted(flattened, key=key_func)
def _incomplete_final_row(self, testcase_name):
chunks, results = self._load_json_test(testcase_name)
response = _ReadRowsResponseV2(chunks)
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
prd.consume_next()
self.assertEqual(prd.state, prd.ROW_IN_PROGRESS)
expected_result = self._sort_flattend_cells(
[result for result in results if not result['error']])
flattened = self._sort_flattend_cells(_flatten_cells(prd))
self.assertEqual(flattened, expected_result)
def test_invalid_no_commit(self):
self._incomplete_final_row('invalid - no commit')
def test_invalid_last_row_missing_commit(self):
self._incomplete_final_row('invalid - last row missing commit')
# Non-error cases
_marker = object()
def _match_results(self, testcase_name, expected_result=_marker):
chunks, results = self._load_json_test(testcase_name)
response = _ReadRowsResponseV2(chunks)
iterator = _MockCancellableIterator(response)
prd = self._makeOne(iterator)
prd.consume_next()
flattened = self._sort_flattend_cells(_flatten_cells(prd))
if expected_result is self._marker:
expected_result = self._sort_flattend_cells(results)
self.assertEqual(flattened, expected_result)
def test_bare_commit_implies_ts_zero(self):
self._match_results('bare commit implies ts=0')
def test_simple_row_with_timestamp(self):
self._match_results('simple row with timestamp')
def test_missing_timestamp_implies_ts_zero(self):
self._match_results('missing timestamp, implied ts=0')
def test_empty_cell_value(self):
self._match_results('empty cell value')
def test_two_unsplit_cells(self):
self._match_results('two unsplit cells')
def test_two_qualifiers(self):
self._match_results('two qualifiers')
def test_two_families(self):
self._match_results('two families')
def test_with_labels(self):
self._match_results('with labels')
def test_split_cell_bare_commit(self):
self._match_results('split cell, bare commit')
def test_split_cell(self):
self._match_results('split cell')
def test_split_four_ways(self):
self._match_results('split four ways')
def test_two_split_cells(self):
self._match_results('two split cells')
def test_multi_qualifier_splits(self):
self._match_results('multi-qualifier splits')
def test_multi_qualifier_multi_split(self):
self._match_results('multi-qualifier multi-split')
def test_multi_family_split(self):
self._match_results('multi-family split')
def test_two_rows(self):
self._match_results('two rows')
def test_two_rows_implicit_timestamp(self):
self._match_results('two rows implicit timestamp')
def test_two_rows_empty_value(self):
self._match_results('two rows empty value')
def test_two_rows_one_with_multiple_cells(self):
self._match_results('two rows, one with multiple cells')
def test_two_rows_multiple_cells_multiple_families(self):
self._match_results('two rows, multiple cells, multiple families')
def test_two_rows_multiple_cells(self):
self._match_results('two rows, multiple cells')
def test_two_rows_four_cells_two_labels(self):
self._match_results('two rows, four cells, 2 labels')
def test_two_rows_with_splits_same_timestamp(self):
self._match_results('two rows with splits, same timestamp')
def test_no_data_after_reset(self):
# JSON testcase has `"results": null`
self._match_results('no data after reset', expected_result=[])
def test_simple_reset(self):
self._match_results('simple reset')
def test_reset_to_new_val(self):
self._match_results('reset to new val')
def test_reset_to_new_qual(self):
self._match_results('reset to new qual')
def test_reset_with_splits(self):
self._match_results('reset with splits')
def test_two_resets(self):
self._match_results('two resets')
def test_reset_to_new_row(self):
self._match_results('reset to new row')
def test_reset_in_between_chunks(self):
self._match_results('reset in between chunks')
def test_empty_cell_chunk(self):
self._match_results('empty cell chunk')
def _flatten_cells(prd):
# Match results format from JSON testcases.
# Doesn't handle error cases.
from gcloud._helpers import _bytes_to_unicode
from gcloud._helpers import _microseconds_from_datetime
for row_key, row in prd.rows.items():
for family_name, family in row.cells.items():
for qualifier, column in family.items():
for cell in column:
yield {
u'rk': _bytes_to_unicode(row_key),
u'fm': family_name,
u'qual': _bytes_to_unicode(qualifier),
u'ts': _microseconds_from_datetime(cell.timestamp),
u'value': _bytes_to_unicode(cell.value),
u'label': u' '.join(cell.labels),
u'error': False,
}
class _MockCancellableIterator(object):
cancel_calls = 0
def __init__(self, *values):
self.iter_values = iter(values)
def cancel(self):
self.cancel_calls += 1
def next(self):
return next(self.iter_values)
def __next__(self): # pragma: NO COVER Py3k
return self.next()
class _Dummy(object):
def __init__(self, **kw):
self.__dict__.update(kw)
class _PartialCellData(object):
row_key = ''
family_name = u''
qualifier = b''
timestamp_micros = 0
def __init__(self, **kw):
self.labels = kw.pop('labels', [])
self.__dict__.update(kw)
class _ReadRowsResponseV2(object):
def __init__(self, chunks, last_scanned_row_key=''):
self.chunks = chunks
self.last_scanned_row_key = last_scanned_row_key
def _generate_cell_chunks(chunk_text_pbs):
from google.protobuf.text_format import Merge
from gcloud.bigtable._generated_v2.bigtable_pb2 import ReadRowsResponse
chunks = []
for chunk_text_pb in chunk_text_pbs:
chunk = ReadRowsResponse.CellChunk()
chunks.append(Merge(chunk_text_pb, chunk))
return chunks
def _parse_readrows_acceptance_tests(filename):
"""Parse acceptance tests from JSON
See:
https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/master/bigtable-client-core/src/test/resources/com/google/cloud/bigtable/grpc/scanner/v2/read-rows-acceptance-test.json
"""
import json
with open(filename) as json_file:
test_json = json.load(json_file)
for test in test_json['tests']:
name = test['name']
chunks = _generate_cell_chunks(test['chunks'])
results = test['results']
yield name, chunks, results

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,565 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestTable(unittest2.TestCase):
PROJECT_ID = 'project-id'
INSTANCE_ID = 'instance-id'
INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID)
TABLE_ID = 'table-id'
TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID
TIMEOUT_SECONDS = 1333
ROW_KEY = b'row-key'
FAMILY_NAME = u'family'
QUALIFIER = b'qualifier'
TIMESTAMP_MICROS = 100
VALUE = b'value'
def _getTargetClass(self):
from gcloud.bigtable.table import Table
return Table
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
table_id = 'table-id'
instance = object()
table = self._makeOne(table_id, instance)
self.assertEqual(table.table_id, table_id)
self.assertTrue(table._instance is instance)
def test_name_property(self):
table_id = 'table-id'
instance_name = 'instance_name'
instance = _Instance(instance_name)
table = self._makeOne(table_id, instance)
expected_name = instance_name + '/tables/' + table_id
self.assertEqual(table.name, expected_name)
def test_column_family_factory(self):
from gcloud.bigtable.column_family import ColumnFamily
table_id = 'table-id'
gc_rule = object()
table = self._makeOne(table_id, None)
column_family_id = 'column_family_id'
column_family = table.column_family(column_family_id, gc_rule=gc_rule)
self.assertTrue(isinstance(column_family, ColumnFamily))
self.assertEqual(column_family.column_family_id, column_family_id)
self.assertTrue(column_family.gc_rule is gc_rule)
self.assertEqual(column_family._table, table)
def test_row_factory_direct(self):
from gcloud.bigtable.row import DirectRow
table_id = 'table-id'
table = self._makeOne(table_id, None)
row_key = b'row_key'
row = table.row(row_key)
self.assertTrue(isinstance(row, DirectRow))
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_conditional(self):
from gcloud.bigtable.row import ConditionalRow
table_id = 'table-id'
table = self._makeOne(table_id, None)
row_key = b'row_key'
filter_ = object()
row = table.row(row_key, filter_=filter_)
self.assertTrue(isinstance(row, ConditionalRow))
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_append(self):
from gcloud.bigtable.row import AppendRow
table_id = 'table-id'
table = self._makeOne(table_id, None)
row_key = b'row_key'
row = table.row(row_key, append=True)
self.assertTrue(isinstance(row, AppendRow))
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_failure(self):
table = self._makeOne(self.TABLE_ID, None)
with self.assertRaises(ValueError):
table.row(b'row_key', filter_=object(), append=True)
def test___eq__(self):
instance = object()
table1 = self._makeOne(self.TABLE_ID, instance)
table2 = self._makeOne(self.TABLE_ID, instance)
self.assertEqual(table1, table2)
def test___eq__type_differ(self):
table1 = self._makeOne(self.TABLE_ID, None)
table2 = object()
self.assertNotEqual(table1, table2)
def test___ne__same_value(self):
instance = object()
table1 = self._makeOne(self.TABLE_ID, instance)
table2 = self._makeOne(self.TABLE_ID, instance)
comparison_val = (table1 != table2)
self.assertFalse(comparison_val)
def test___ne__(self):
table1 = self._makeOne('table_id1', 'instance1')
table2 = self._makeOne('table_id2', 'instance2')
self.assertNotEqual(table1, table2)
def _create_test_helper(self, initial_split_keys):
from gcloud._helpers import _to_bytes
from gcloud.bigtable._testing import _FakeStub
client = _Client(timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_NAME, client=client)
table = self._makeOne(self.TABLE_ID, instance)
# Create request_pb
splits_pb = [
_CreateTableRequestSplitPB(key=_to_bytes(key))
for key in initial_split_keys or ()]
request_pb = _CreateTableRequestPB(
initial_splits=splits_pb,
parent=self.INSTANCE_NAME,
table_id=self.TABLE_ID,
)
# Create response_pb
response_pb = _TablePB()
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # create() has no return value.
# Perform the method and check the result.
result = table.create(initial_split_keys=initial_split_keys)
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'CreateTable',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def test_create(self):
initial_split_keys = None
self._create_test_helper(initial_split_keys)
def test_create_with_split_keys(self):
initial_split_keys = [b's1', b's2']
self._create_test_helper(initial_split_keys)
def _list_column_families_helper(self):
from gcloud.bigtable._testing import _FakeStub
client = _Client(timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_NAME, client=client)
table = self._makeOne(self.TABLE_ID, instance)
# Create request_pb
request_pb = _GetTableRequestPB(name=self.TABLE_NAME)
# Create response_pb
COLUMN_FAMILY_ID = 'foo'
column_family = _ColumnFamilyPB()
response_pb = _TablePB(
column_families={COLUMN_FAMILY_ID: column_family},
)
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = {
COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID),
}
# Perform the method and check the result.
result = table.list_column_families()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'GetTable',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def test_list_column_families(self):
self._list_column_families_helper()
def test_delete(self):
from google.protobuf import empty_pb2
from gcloud.bigtable._testing import _FakeStub
client = _Client(timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_NAME, client=client)
table = self._makeOne(self.TABLE_ID, instance)
# Create request_pb
request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME)
# Create response_pb
response_pb = empty_pb2.Empty()
# Patch the stub used by the API method.
client._table_stub = stub = _FakeStub(response_pb)
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = table.delete()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'DeleteTable',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
def _read_row_helper(self, chunks, expected_result):
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable import table as MUT
client = _Client(timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_NAME, client=client)
table = self._makeOne(self.TABLE_ID, instance)
# Create request_pb
request_pb = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, row_key, filter_):
mock_created.append((table_name, row_key, filter_))
return request_pb
# Create response_iterator
if chunks is None:
response_iterator = iter(()) # no responses at all
else:
response_pb = _ReadRowsResponsePB(chunks=chunks)
response_iterator = iter([response_pb])
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_iterator)
# Perform the method and check the result.
filter_obj = object()
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_row(self.ROW_KEY, filter_=filter_obj)
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ReadRows',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
self.assertEqual(mock_created,
[(table.name, self.ROW_KEY, filter_obj)])
def test_read_row_miss_no__responses(self):
self._read_row_helper(None, None)
def test_read_row_miss_no_chunks_in_response(self):
chunks = []
self._read_row_helper(chunks, None)
def test_read_row_complete(self):
from gcloud.bigtable.row_data import Cell
from gcloud.bigtable.row_data import PartialRowData
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk]
expected_result = PartialRowData(row_key=self.ROW_KEY)
family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
column = family.setdefault(self.QUALIFIER, [])
column.append(Cell.from_pb(chunk))
self._read_row_helper(chunks, expected_result)
def test_read_row_still_partial(self):
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
)
# No "commit row".
chunks = [chunk]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None)
def test_read_rows(self):
from gcloud._testing import _Monkey
from gcloud.bigtable._testing import _FakeStub
from gcloud.bigtable.row_data import PartialRowsData
from gcloud.bigtable import table as MUT
client = _Client(timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_NAME, client=client)
table = self._makeOne(self.TABLE_ID, instance)
# Create request_pb
request_pb = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request_pb
# Create response_iterator
response_iterator = object()
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_iterator)
# Create expected_result.
expected_result = PartialRowsData(response_iterator)
# Perform the method and check the result.
start_key = b'start-key'
end_key = b'end-key'
filter_obj = object()
limit = 22
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_rows(
start_key=start_key, end_key=end_key, filter_=filter_obj,
limit=limit)
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ReadRows',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
created_kwargs = {
'start_key': start_key,
'end_key': end_key,
'filter_': filter_obj,
'limit': limit,
}
self.assertEqual(mock_created, [(table.name, created_kwargs)])
def test_sample_row_keys(self):
from gcloud.bigtable._testing import _FakeStub
client = _Client(timeout_seconds=self.TIMEOUT_SECONDS)
instance = _Instance(self.INSTANCE_NAME, client=client)
table = self._makeOne(self.TABLE_ID, instance)
# Create request_pb
request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME)
# Create response_iterator
response_iterator = object() # Just passed to a mock.
# Patch the stub used by the API method.
client._data_stub = stub = _FakeStub(response_iterator)
# Create expected_result.
expected_result = response_iterator
# Perform the method and check the result.
result = table.sample_row_keys()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'SampleRowKeys',
(request_pb, self.TIMEOUT_SECONDS),
{},
)])
class Test__create_row_request(unittest2.TestCase):
def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None,
filter_=None, limit=None):
from gcloud.bigtable.table import _create_row_request
return _create_row_request(
table_name, row_key=row_key, start_key=start_key, end_key=end_key,
filter_=filter_, limit=limit)
def test_table_name_only(self):
table_name = 'table_name'
result = self._callFUT(table_name)
expected_result = _ReadRowsRequestPB(
table_name=table_name)
self.assertEqual(result, expected_result)
def test_row_key_row_range_conflict(self):
with self.assertRaises(ValueError):
self._callFUT(None, row_key=object(), end_key=object())
def test_row_key(self):
table_name = 'table_name'
row_key = b'row_key'
result = self._callFUT(table_name, row_key=row_key)
expected_result = _ReadRowsRequestPB(
table_name=table_name,
)
expected_result.rows.row_keys.append(row_key)
self.assertEqual(result, expected_result)
def test_row_range_start_key(self):
table_name = 'table_name'
start_key = b'start_key'
result = self._callFUT(table_name, start_key=start_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(start_key_closed=start_key)
self.assertEqual(result, expected_result)
def test_row_range_end_key(self):
table_name = 'table_name'
end_key = b'end_key'
result = self._callFUT(table_name, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_row_range_both_keys(self):
table_name = 'table_name'
start_key = b'start_key'
end_key = b'end_key'
result = self._callFUT(table_name, start_key=start_key,
end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_with_filter(self):
from gcloud.bigtable.row_filters import RowSampleFilter
table_name = 'table_name'
row_filter = RowSampleFilter(0.33)
result = self._callFUT(table_name, filter_=row_filter)
expected_result = _ReadRowsRequestPB(
table_name=table_name,
filter=row_filter.to_pb(),
)
self.assertEqual(result, expected_result)
def test_with_limit(self):
table_name = 'table_name'
limit = 1337
result = self._callFUT(table_name, limit=limit)
expected_result = _ReadRowsRequestPB(
table_name=table_name,
rows_limit=limit,
)
self.assertEqual(result, expected_result)
def _CreateTableRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
return table_admin_v2_pb2.CreateTableRequest(*args, **kw)
def _CreateTableRequestSplitPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw)
def _DeleteTableRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
return table_admin_v2_pb2.DeleteTableRequest(*args, **kw)
def _GetTableRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_table_admin_pb2 as table_admin_v2_pb2)
return table_admin_v2_pb2.GetTableRequest(*args, **kw)
def _ReadRowsRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
def _ReadRowsResponseCellChunkPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
family_name = kw.pop('family_name')
qualifier = kw.pop('qualifier')
message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw)
message.family_name.value = family_name
message.qualifier.value = qualifier
return message
def _ReadRowsResponsePB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.ReadRowsResponse(*args, **kw)
def _SampleRowKeysRequestPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
bigtable_pb2 as messages_v2_pb2)
return messages_v2_pb2.SampleRowKeysRequest(*args, **kw)
def _TablePB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
return table_v2_pb2.Table(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from gcloud.bigtable._generated_v2 import (
table_pb2 as table_v2_pb2)
return table_v2_pb2.ColumnFamily(*args, **kw)
class _Client(object):
data_stub = None
instance_stub = None
operations_stub = None
table_stub = None
def __init__(self, timeout_seconds=None):
self.timeout_seconds = timeout_seconds
class _Instance(object):
def __init__(self, name, client=None):
self.name = name
self._client = client