Updated DB_Helper by adding firebase methods.

This commit is contained in:
Batuhan Berk Başoğlu 2020-10-05 16:53:40 -04:00
parent 485cc3bbba
commit c82121d036
1810 changed files with 537281 additions and 1 deletions

View file

@ -0,0 +1,15 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated protobuf modules for Google Cloud Bigtable API."""

View file

@ -0,0 +1,93 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.cluster.v1;
import "google/api/annotations.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableClusterDataProto";
option java_package = "com.google.bigtable.admin.cluster.v1";
// A physical location in which a particular project can allocate Cloud BigTable
// resources.
message Zone {
// Possible states of a zone.
enum Status {
// The state of the zone is unknown or unspecified.
UNKNOWN = 0;
// The zone is in a good state.
OK = 1;
// The zone is down for planned maintenance.
PLANNED_MAINTENANCE = 2;
// The zone is down for emergency or unplanned maintenance.
EMERGENCY_MAINENANCE = 3;
}
// A permanent unique identifier for the zone.
// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*
string name = 1;
// The name of this zone as it appears in UIs.
string display_name = 2;
// The current state of this zone.
Status status = 3;
}
// An isolated set of Cloud BigTable resources on which tables can be hosted.
message Cluster {
// A permanent unique identifier for the cluster. For technical reasons, the
// zone in which the cluster resides is included here.
// Values are of the form
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
string name = 1;
// The operation currently running on the cluster, if any.
// This cannot be set directly, only through CreateCluster, UpdateCluster,
// or UndeleteCluster. Calls to these methods will be rejected if
// "current_operation" is already set.
google.longrunning.Operation current_operation = 3;
// The descriptive name for this cluster as it appears in UIs.
// Must be unique per zone.
string display_name = 4;
// The number of serve nodes allocated to this cluster.
int32 serve_nodes = 5;
// What storage type to use for tables in this cluster. Only configurable at
// cluster creation time. If unspecified, STORAGE_SSD will be used.
StorageType default_storage_type = 8;
}
enum StorageType {
// The storage type used is unspecified.
STORAGE_UNSPECIFIED = 0;
// Data will be stored in SSD, providing low and consistent latencies.
STORAGE_SSD = 1;
// Data will be stored in HDD, providing high and less predictable
// latencies.
STORAGE_HDD = 2;
}

View file

@ -0,0 +1,129 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.cluster.v1;
import "google/api/annotations.proto";
import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto";
import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/empty.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableClusterServicesProto";
option java_package = "com.google.bigtable.admin.cluster.v1";
// Service for managing zonal Cloud Bigtable resources.
service BigtableClusterService {
// Lists the supported zones for the given project.
rpc ListZones(ListZonesRequest) returns (ListZonesResponse) {
option (google.api.http) = { get: "/v1/{name=projects/*}/zones" };
}
// Gets information about a particular cluster.
rpc GetCluster(GetClusterRequest) returns (Cluster) {
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" };
}
// Lists all clusters in the given project, along with any zones for which
// cluster information could not be retrieved.
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" };
}
// Creates a cluster and begins preparing it to begin serving. The returned
// cluster embeds as its "current_operation" a long-running operation which
// can be used to track the progress of turning up the new cluster.
// Immediately upon completion of this request:
// * The cluster will be readable via the API, with all requested attributes
// but no allocated resources.
// Until completion of the embedded operation:
// * Cancelling the operation will render the cluster immediately unreadable
// via the API.
// * All other attempts to modify or delete the cluster will be rejected.
// Upon completion of the embedded operation:
// * Billing for all successfully-allocated resources will begin (some types
// may have lower than the requested levels).
// * New tables can be created in the cluster.
// * The cluster's allocated resource levels will be readable via the API.
// The embedded operation's "metadata" field type is
// [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
rpc CreateCluster(CreateClusterRequest) returns (Cluster) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" };
}
// Updates a cluster, and begins allocating or releasing resources as
// requested. The returned cluster embeds as its "current_operation" a
// long-running operation which can be used to track the progress of updating
// the cluster.
// Immediately upon completion of this request:
// * For resource types where a decrease in the cluster's allocation has been
// requested, billing will be based on the newly-requested level.
// Until completion of the embedded operation:
// * Cancelling the operation will set its metadata's "cancelled_at_time",
// and begin restoring resources to their pre-request values. The operation
// is guaranteed to succeed at undoing all resource changes, after which
// point it will terminate with a CANCELLED status.
// * All other attempts to modify or delete the cluster will be rejected.
// * Reading the cluster via the API will continue to give the pre-request
// resource levels.
// Upon completion of the embedded operation:
// * Billing will begin for all successfully-allocated resources (some types
// may have lower than the requested levels).
// * All newly-reserved resources will be available for serving the cluster's
// tables.
// * The cluster's new resource levels will be readable via the API.
// [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
rpc UpdateCluster(Cluster) returns (Cluster) {
option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" };
}
// Marks a cluster and all of its tables for permanent deletion in 7 days.
// Immediately upon completion of the request:
// * Billing will cease for all of the cluster's reserved resources.
// * The cluster's "delete_time" field will be set 7 days in the future.
// Soon afterward:
// * All tables within the cluster will become unavailable.
// Prior to the cluster's "delete_time":
// * The cluster can be recovered with a call to UndeleteCluster.
// * All other attempts to modify or delete the cluster will be rejected.
// At the cluster's "delete_time":
// * The cluster and *all of its tables* will immediately and irrevocably
// disappear from the API, and their data will be permanently deleted.
rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" };
}
// Cancels the scheduled deletion of an cluster and begins preparing it to
// resume serving. The returned operation will also be embedded as the
// cluster's "current_operation".
// Immediately upon completion of this request:
// * The cluster's "delete_time" field will be unset, protecting it from
// automatic deletion.
// Until completion of the returned operation:
// * The operation cannot be cancelled.
// Upon completion of the returned operation:
// * Billing for the cluster's resources will resume.
// * All tables within the cluster will be available.
// [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "null" };
}
}

View file

@ -0,0 +1,134 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.cluster.v1;
import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto";
import "google/protobuf/timestamp.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableClusterServiceMessagesProto";
option java_package = "com.google.bigtable.admin.cluster.v1";
// Request message for BigtableClusterService.ListZones.
message ListZonesRequest {
// The unique name of the project for which a list of supported zones is
// requested.
// Values are of the form projects/<project>
string name = 1;
}
// Response message for BigtableClusterService.ListZones.
message ListZonesResponse {
// The list of requested zones.
repeated Zone zones = 1;
}
// Request message for BigtableClusterService.GetCluster.
message GetClusterRequest {
// The unique name of the requested cluster.
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
string name = 1;
}
// Request message for BigtableClusterService.ListClusters.
message ListClustersRequest {
// The unique name of the project for which a list of clusters is requested.
// Values are of the form projects/<project>
string name = 1;
}
// Response message for BigtableClusterService.ListClusters.
message ListClustersResponse {
// The list of requested Clusters.
repeated Cluster clusters = 1;
// The zones for which clusters could not be retrieved.
repeated Zone failed_zones = 2;
}
// Request message for BigtableClusterService.CreateCluster.
message CreateClusterRequest {
// The unique name of the zone in which to create the cluster.
// Values are of the form projects/<project>/zones/<zone>
string name = 1;
// The id to be used when referring to the new cluster within its zone,
// e.g. just the "test-cluster" section of the full name
// "projects/<project>/zones/<zone>/clusters/test-cluster".
string cluster_id = 2;
// The cluster to create.
// The "name", "delete_time", and "current_operation" fields must be left
// blank.
Cluster cluster = 3;
}
// Metadata type for the operation returned by
// BigtableClusterService.CreateCluster.
message CreateClusterMetadata {
// The request which prompted the creation of this operation.
CreateClusterRequest original_request = 1;
// The time at which original_request was received.
google.protobuf.Timestamp request_time = 2;
// The time at which this operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 3;
}
// Metadata type for the operation returned by
// BigtableClusterService.UpdateCluster.
message UpdateClusterMetadata {
// The request which prompted the creation of this operation.
Cluster original_request = 1;
// The time at which original_request was received.
google.protobuf.Timestamp request_time = 2;
// The time at which this operation was cancelled. If set, this operation is
// in the process of undoing itself (which is guaranteed to succeed) and
// cannot be cancelled again.
google.protobuf.Timestamp cancel_time = 3;
// The time at which this operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 4;
}
// Request message for BigtableClusterService.DeleteCluster.
message DeleteClusterRequest {
// The unique name of the cluster to be deleted.
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
string name = 1;
}
// Request message for BigtableClusterService.UndeleteCluster.
message UndeleteClusterRequest {
// The unique name of the cluster to be un-deleted.
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
string name = 1;
}
// Metadata type for the operation returned by
// BigtableClusterService.UndeleteCluster.
message UndeleteClusterMetadata {
// The time at which the original request was received.
google.protobuf.Timestamp request_time = 1;
// The time at which this operation failed or was completed successfully.
google.protobuf.Timestamp finish_time = 2;
}

View file

@ -0,0 +1,515 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
option java_multiple_files = true;
option java_outer_classname = "BigtableDataProto";
option java_package = "com.google.bigtable.v1";
// Specifies the complete (requested) contents of a single row of a table.
// Rows which exceed 256MiB in size cannot be read in full.
message Row {
// The unique key which identifies this row within its table. This is the same
// key that's used to identify the row in, for example, a MutateRowRequest.
// May contain any non-empty byte string up to 4KiB in length.
bytes key = 1;
// May be empty, but only if the entire row is empty.
// The mutual ordering of column families is not specified.
repeated Family families = 2;
}
// Specifies (some of) the contents of a single row/column family of a table.
message Family {
// The unique key which identifies this family within its row. This is the
// same key that's used to identify the family in, for example, a RowFilter
// which sets its "family_name_regex_filter" field.
// Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may
// produce cells in a sentinel family with an empty name.
// Must be no greater than 64 characters in length.
string name = 1;
// Must not be empty. Sorted in order of increasing "qualifier".
repeated Column columns = 2;
}
// Specifies (some of) the contents of a single row/column of a table.
message Column {
// The unique key which identifies this column within its family. This is the
// same key that's used to identify the column in, for example, a RowFilter
// which sets its "column_qualifier_regex_filter" field.
// May contain any byte string, including the empty string, up to 16kiB in
// length.
bytes qualifier = 1;
// Must not be empty. Sorted in order of decreasing "timestamp_micros".
repeated Cell cells = 2;
}
// Specifies (some of) the contents of a single row/column/timestamp of a table.
message Cell {
// The cell's stored timestamp, which also uniquely identifies it within
// its column.
// Values are always expressed in microseconds, but individual tables may set
// a coarser "granularity" to further restrict the allowed values. For
// example, a table which specifies millisecond granularity will only allow
// values of "timestamp_micros" which are multiples of 1000.
int64 timestamp_micros = 1;
// The value stored in the cell.
// May contain any byte string, including the empty string, up to 100MiB in
// length.
bytes value = 2;
// Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter].
repeated string labels = 3;
}
// Specifies a contiguous range of rows.
message RowRange {
// Inclusive lower bound. If left empty, interpreted as the empty string.
bytes start_key = 2;
// Exclusive upper bound. If left empty, interpreted as infinity.
bytes end_key = 3;
}
// Specifies a non-contiguous set of rows.
message RowSet {
// Single rows included in the set.
repeated bytes row_keys = 1;
// Contiguous row ranges included in the set.
repeated RowRange row_ranges = 2;
}
// Specifies a contiguous range of columns within a single column family.
// The range spans from <column_family>:<start_qualifier> to
// <column_family>:<end_qualifier>, where both bounds can be either inclusive or
// exclusive.
message ColumnRange {
// The name of the column family within which this range falls.
string family_name = 1;
// The column qualifier at which to start the range (within 'column_family').
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_qualifier {
// Used when giving an inclusive lower bound for the range.
bytes start_qualifier_inclusive = 2;
// Used when giving an exclusive lower bound for the range.
bytes start_qualifier_exclusive = 3;
}
// The column qualifier at which to end the range (within 'column_family').
// If neither field is set, interpreted as the infinite string, exclusive.
oneof end_qualifier {
// Used when giving an inclusive upper bound for the range.
bytes end_qualifier_inclusive = 4;
// Used when giving an exclusive upper bound for the range.
bytes end_qualifier_exclusive = 5;
}
}
// Specified a contiguous range of microsecond timestamps.
message TimestampRange {
// Inclusive lower bound. If left empty, interpreted as 0.
int64 start_timestamp_micros = 1;
// Exclusive upper bound. If left empty, interpreted as infinity.
int64 end_timestamp_micros = 2;
}
// Specifies a contiguous range of raw byte values.
message ValueRange {
// The value at which to start the range.
// If neither field is set, interpreted as the empty string, inclusive.
oneof start_value {
// Used when giving an inclusive lower bound for the range.
bytes start_value_inclusive = 1;
// Used when giving an exclusive lower bound for the range.
bytes start_value_exclusive = 2;
}
// The value at which to end the range.
// If neither field is set, interpreted as the infinite string, exclusive.
oneof end_value {
// Used when giving an inclusive upper bound for the range.
bytes end_value_inclusive = 3;
// Used when giving an exclusive upper bound for the range.
bytes end_value_exclusive = 4;
}
}
// Takes a row as input and produces an alternate view of the row based on
// specified rules. For example, a RowFilter might trim down a row to include
// just the cells from columns matching a given regular expression, or might
// return all the cells of a row but not their values. More complicated filters
// can be composed out of these components to express requests such as, "within
// every column of a particular family, give just the two most recent cells
// which are older than timestamp X."
//
// There are two broad categories of RowFilters (true filters and transformers),
// as well as two ways to compose simple filters into more complex ones
// (chains and interleaves). They work as follows:
//
// * True filters alter the input row by excluding some of its cells wholesale
// from the output row. An example of a true filter is the "value_regex_filter",
// which excludes cells whose values don't match the specified pattern. All
// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
// important point to keep in mind is that RE2(.) is equivalent by default to
// RE2([^\n]), meaning that it does not match newlines. When attempting to match
// an arbitrary byte, you should therefore use the escape sequence '\C', which
// may need to be further escaped as '\\C' in your client language.
//
// * Transformers alter the input row by changing the values of some of its
// cells in the output, without excluding them completely. Currently, the only
// supported transformer is the "strip_value_transformer", which replaces every
// cell's value with the empty string.
//
// * Chains and interleaves are described in more detail in the
// RowFilter.Chain and RowFilter.Interleave documentation.
//
// The total serialized size of a RowFilter message must not
// exceed 4096 bytes, and RowFilters may not be nested within each other
// (in Chains or Interleaves) to a depth of more than 20.
message RowFilter {
// A RowFilter which sends rows through several RowFilters in sequence.
message Chain {
// The elements of "filters" are chained together to process the input row:
// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
// The full chain is executed atomically.
repeated RowFilter filters = 1;
}
// A RowFilter which sends each row to each of several component
// RowFilters and interleaves the results.
message Interleave {
// The elements of "filters" all process a copy of the input row, and the
// results are pooled, sorted, and combined into a single output row.
// If multiple cells are produced with the same column and timestamp,
// they will all appear in the output row in an unspecified mutual order.
// Consider the following example, with three filters:
//
// input row
// |
// -----------------------------------------------------
// | | |
// f(0) f(1) f(2)
// | | |
// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
// 2: foo,blah,11,z far,blah,5,x far,blah,5,x
// | | |
// -----------------------------------------------------
// |
// 1: foo,bar,10,z // could have switched with #2
// 2: foo,bar,10,x // could have switched with #1
// 3: foo,blah,11,z
// 4: far,bar,7,a
// 5: far,blah,5,x // identical to #6
// 6: far,blah,5,x // identical to #5
// All interleaved filters are executed atomically.
repeated RowFilter filters = 1;
}
// A RowFilter which evaluates one of two possible RowFilters, depending on
// whether or not a predicate RowFilter outputs any cells from the input row.
//
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
// true and false filters, which may lead to inconsistent or unexpected
// results. Additionally, Condition filters have poor performance, especially
// when filters are set for the false condition.
message Condition {
// If "predicate_filter" outputs any cells, then "true_filter" will be
// evaluated on the input row. Otherwise, "false_filter" will be evaluated.
RowFilter predicate_filter = 1;
// The filter to apply to the input row if "predicate_filter" returns any
// results. If not provided, no results will be returned in the true case.
RowFilter true_filter = 2;
// The filter to apply to the input row if "predicate_filter" does not
// return any results. If not provided, no results will be returned in the
// false case.
RowFilter false_filter = 3;
}
// Which of the possible RowFilter types to apply. If none are set, this
// RowFilter returns all cells in the input row.
oneof filter {
// Applies several RowFilters to the data in sequence, progressively
// narrowing the results.
Chain chain = 1;
// Applies several RowFilters to the data in parallel and combines the
// results.
Interleave interleave = 2;
// Applies one of two possible RowFilters to the data based on the output of
// a predicate RowFilter.
Condition condition = 3;
// ADVANCED USE ONLY.
// Hook for introspection into the RowFilter. Outputs all cells directly to
// the output of the read rather than to any parent filter. Consider the
// following example:
//
// Chain(
// FamilyRegex("A"),
// Interleave(
// All(),
// Chain(Label("foo"), Sink())
// ),
// QualifierRegex("B")
// )
//
// A,A,1,w
// A,B,2,x
// B,B,4,z
// |
// FamilyRegex("A")
// |
// A,A,1,w
// A,B,2,x
// |
// +------------+-------------+
// | |
// All() Label(foo)
// | |
// A,A,1,w A,A,1,w,labels:[foo]
// A,B,2,x A,B,2,x,labels:[foo]
// | |
// | Sink() --------------+
// | | |
// +------------+ x------+ A,A,1,w,labels:[foo]
// | A,B,2,x,labels:[foo]
// A,A,1,w |
// A,B,2,x |
// | |
// QualifierRegex("B") |
// | |
// A,B,2,x |
// | |
// +--------------------------------+
// |
// A,A,1,w,labels:[foo]
// A,B,2,x,labels:[foo] // could be switched
// A,B,2,x // could be switched
//
// Despite being excluded by the qualifier filter, a copy of every cell
// that reaches the sink is present in the final result.
//
// As with an [Interleave][google.bigtable.v1.RowFilter.Interleave],
// duplicate cells are possible, and appear in an unspecified mutual order.
// In this case we have a duplicate with column "A:B" and timestamp 2,
// because one copy passed through the all filter while the other was
// passed through the label and sink. Note that one copy has label "foo",
// while the other does not.
//
// Cannot be used within the `predicate_filter`, `true_filter`, or
// `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition].
bool sink = 16;
// Matches all cells, regardless of input. Functionally equivalent to
// leaving `filter` unset, but included for completeness.
bool pass_all_filter = 17;
// Does not match any cells, regardless of input. Useful for temporarily
// disabling just part of a filter.
bool block_all_filter = 18;
// Matches only cells from rows whose keys satisfy the given RE2 regex. In
// other words, passes through the entire row when the key matches, and
// otherwise produces an empty row.
// Note that, since row keys can contain arbitrary bytes, the '\C' escape
// sequence must be used if a true wildcard is desired. The '.' character
// will not match the new line character '\n', which may be present in a
// binary key.
bytes row_key_regex_filter = 4;
// Matches all cells from a row with probability p, and matches no cells
// from the row with probability 1-p.
double row_sample_filter = 14;
// Matches only cells from columns whose families satisfy the given RE2
// regex. For technical reasons, the regex must not contain the ':'
// character, even if it is not being used as a literal.
// Note that, since column families cannot contain the new line character
// '\n', it is sufficient to use '.' as a full wildcard when matching
// column family names.
string family_name_regex_filter = 5;
// Matches only cells from columns whose qualifiers satisfy the given RE2
// regex.
// Note that, since column qualifiers can contain arbitrary bytes, the '\C'
// escape sequence must be used if a true wildcard is desired. The '.'
// character will not match the new line character '\n', which may be
// present in a binary qualifier.
bytes column_qualifier_regex_filter = 6;
// Matches only cells from columns within the given range.
ColumnRange column_range_filter = 7;
// Matches only cells with timestamps within the given range.
TimestampRange timestamp_range_filter = 8;
// Matches only cells with values that satisfy the given regular expression.
// Note that, since cell values can contain arbitrary bytes, the '\C' escape
// sequence must be used if a true wildcard is desired. The '.' character
// will not match the new line character '\n', which may be present in a
// binary value.
bytes value_regex_filter = 9;
// Matches only cells with values that fall within the given range.
ValueRange value_range_filter = 15;
// Skips the first N cells of each row, matching all subsequent cells.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_offset_filter = 10;
// Matches only the first N cells of each row.
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_row_limit_filter = 11;
// Matches only the most recent N cells within each column. For example,
// if N=2, this filter would match column "foo:bar" at timestamps 10 and 9,
// skip all earlier cells in "foo:bar", and then begin matching again in
// column "foo:bar2".
// If duplicate cells are present, as is possible when using an Interleave,
// each copy of the cell is counted separately.
int32 cells_per_column_limit_filter = 12;
// Replaces each cell's value with the empty string.
bool strip_value_transformer = 13;
// Applies the given label to all cells in the output row. This allows
// the client to determine which results were produced from which part of
// the filter.
//
// Values must be at most 15 characters in length, and match the RE2
// pattern [a-z0-9\\-]+
//
// Due to a technical limitation, it is not currently possible to apply
// multiple labels to a cell. As a result, a Chain may have no more than
// one sub-filter which contains a apply_label_transformer. It is okay for
// an Interleave to contain multiple apply_label_transformers, as they will
// be applied to separate copies of the input. This may be relaxed in the
// future.
string apply_label_transformer = 19;
}
}
// Specifies a particular change to be made to the contents of a row.
message Mutation {
// A Mutation which sets the value of the specified cell.
message SetCell {
// The name of the family into which new data should be written.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
// The qualifier of the column into which new data should be written.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The timestamp of the cell into which new data should be written.
// Use -1 for current Bigtable server time.
// Otherwise, the client should set this value itself, noting that the
// default value is a timestamp of zero if the field is left unspecified.
// Values must match the "granularity" of the table (e.g. micros, millis).
int64 timestamp_micros = 3;
// The value to be written into the specified cell.
bytes value = 4;
}
// A Mutation which deletes cells from the specified column, optionally
// restricting the deletions to a given timestamp range.
message DeleteFromColumn {
// The name of the family from which cells should be deleted.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
// The qualifier of the column from which cells should be deleted.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The range of timestamps within which cells should be deleted.
TimestampRange time_range = 3;
}
// A Mutation which deletes all cells from the specified column family.
message DeleteFromFamily {
// The name of the family from which cells should be deleted.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
}
// A Mutation which deletes all cells from the containing row.
message DeleteFromRow {
}
// Which of the possible Mutation types to apply.
oneof mutation {
// Set a cell's value.
SetCell set_cell = 1;
// Deletes cells from a column.
DeleteFromColumn delete_from_column = 2;
// Deletes cells from a column family.
DeleteFromFamily delete_from_family = 3;
// Deletes cells from the entire row.
DeleteFromRow delete_from_row = 4;
}
}
// Specifies an atomic read/modify/write operation on the latest value of the
// specified column.
message ReadModifyWriteRule {
// The name of the family to which the read/modify/write should be applied.
// Must match [-_.a-zA-Z0-9]+
string family_name = 1;
// The qualifier of the column to which the read/modify/write should be
// applied.
// Can be any byte string, including the empty string.
bytes column_qualifier = 2;
// The rule used to determine the column's new latest value from its current
// latest value.
oneof rule {
// Rule specifying that "append_value" be appended to the existing value.
// If the targeted cell is unset, it will be treated as containing the
// empty string.
bytes append_value = 3;
// Rule specifying that "increment_amount" be added to the existing value.
// If the targeted cell is unset, it will be treated as containing a zero.
// Otherwise, the targeted cell must contain an 8-byte value (interpreted
// as a 64-bit big-endian signed integer), or the entire request will fail.
int64 increment_amount = 4;
}
}

View file

@ -0,0 +1,73 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
import "google/api/annotations.proto";
import "google/bigtable/v1/bigtable_data.proto";
import "google/bigtable/v1/bigtable_service_messages.proto";
import "google/protobuf/empty.proto";
option java_generic_services = true;
option java_multiple_files = true;
option java_outer_classname = "BigtableServicesProto";
option java_package = "com.google.bigtable.v1";
// Service for reading from and writing to existing Bigtables.
service BigtableService {
// Streams back the contents of all requested rows, optionally applying
// the same Reader filter to each. Depending on their size, rows may be
// broken up across multiple responses, but atomicity of each row will still
// be preserved.
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" };
}
// Returns a sample of row keys in the table. The returned row keys will
// delimit contiguous sections of the table of approximately equal size,
// which can be used to break up the data for distributed tasks like
// mapreduces.
rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" };
}
// Mutates a row atomically. Cells already present in the row are left
// unchanged unless explicitly changed by 'mutation'.
rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" };
}
// Mutates multiple rows in a batch. Each individual row is mutated
// atomically as in MutateRow, but the entire batch is not executed
// atomically.
rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" };
}
// Mutates a row atomically based on the output of a predicate Reader filter.
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" };
}
// Modifies a row atomically, reading the latest existing timestamp/value from
// the specified columns and writing a new value at
// max(existing timestamp, current server time) based on pre-defined
// read/modify/write rules. Returns the new contents of all modified cells.
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" };
}
}

View file

@ -0,0 +1,214 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.v1;
import "google/bigtable/v1/bigtable_data.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableServiceMessagesProto";
option java_package = "com.google.bigtable.v1";
// Request message for BigtableServer.ReadRows.
message ReadRowsRequest {
// The unique name of the table from which to read.
string table_name = 1;
// If neither row_key nor row_range is set, reads from all rows.
oneof target {
// The key of a single row from which to read.
bytes row_key = 2;
// A range of rows from which to read.
RowRange row_range = 3;
// A set of rows from which to read. Entries need not be in order, and will
// be deduplicated before reading.
// The total serialized size of the set must not exceed 1MB.
RowSet row_set = 8;
}
// The filter to apply to the contents of the specified row(s). If unset,
// reads the entire table.
RowFilter filter = 5;
// By default, rows are read sequentially, producing results which are
// guaranteed to arrive in increasing row order. Setting
// "allow_row_interleaving" to true allows multiple rows to be interleaved in
// the response stream, which increases throughput but breaks this guarantee,
// and may force the client to use more memory to buffer partially-received
// rows. Cannot be set to true when specifying "num_rows_limit".
bool allow_row_interleaving = 6;
// The read will terminate after committing to N rows' worth of results. The
// default (zero) is to return all results.
// Note that "allow_row_interleaving" cannot be set to true when this is set.
int64 num_rows_limit = 7;
}
// Response message for BigtableService.ReadRows.
message ReadRowsResponse {
// Specifies a piece of a row's contents returned as part of the read
// response stream.
message Chunk {
oneof chunk {
// A subset of the data from a particular row. As long as no "reset_row"
// is received in between, multiple "row_contents" from the same row are
// from the same atomic view of that row, and will be received in the
// expected family/column/timestamp order.
Family row_contents = 1;
// Indicates that the client should drop all previous chunks for
// "row_key", as it will be re-read from the beginning.
bool reset_row = 2;
// Indicates that the client can safely process all previous chunks for
// "row_key", as its data has been fully read.
bool commit_row = 3;
}
}
// The key of the row for which we're receiving data.
// Results will be received in increasing row key order, unless
// "allow_row_interleaving" was specified in the request.
bytes row_key = 1;
// One or more chunks of the row specified by "row_key".
repeated Chunk chunks = 2;
}
// Request message for BigtableService.SampleRowKeys.
message SampleRowKeysRequest {
// The unique name of the table from which to sample row keys.
string table_name = 1;
}
// Response message for BigtableService.SampleRowKeys.
message SampleRowKeysResponse {
// Sorted streamed sequence of sample row keys in the table. The table might
// have contents before the first row key in the list and after the last one,
// but a key containing the empty string indicates "end of table" and will be
// the last response given, if present.
// Note that row keys in this list may not have ever been written to or read
// from, and users should therefore not make any assumptions about the row key
// structure that are specific to their use case.
bytes row_key = 1;
// Approximate total storage space used by all rows in the table which precede
// "row_key". Buffering the contents of all rows between two subsequent
// samples would require space roughly equal to the difference in their
// "offset_bytes" fields.
int64 offset_bytes = 2;
}
// Request message for BigtableService.MutateRow.
message MutateRowRequest {
// The unique name of the table to which the mutation should be applied.
string table_name = 1;
// The key of the row to which the mutation should be applied.
bytes row_key = 2;
// Changes to be atomically applied to the specified row. Entries are applied
// in order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry and at most 100000.
repeated Mutation mutations = 3;
}
// Request message for BigtableService.MutateRows.
message MutateRowsRequest {
message Entry {
// The key of the row to which the `mutations` should be applied.
bytes row_key = 1;
// Changes to be atomically applied to the specified row. Mutations are
// applied in order, meaning that earlier mutations can be masked by
// later ones.
// At least one mutation must be specified.
repeated Mutation mutations = 2;
}
// The unique name of the table to which the mutations should be applied.
string table_name = 1;
// The row keys/mutations to be applied in bulk.
// Each entry is applied as an atomic mutation, but the entries may be
// applied in arbitrary order (even between entries for the same row).
// At least one entry must be specified, and in total the entries may
// contain at most 100000 mutations.
repeated Entry entries = 2;
}
// Response message for BigtableService.MutateRows.
message MutateRowsResponse {
// The results for each Entry from the request, presented in the order
// in which the entries were originally given.
repeated google.rpc.Status statuses = 1;
}
// Request message for BigtableService.CheckAndMutateRowRequest
message CheckAndMutateRowRequest {
// The unique name of the table to which the conditional mutation should be
// applied.
string table_name = 1;
// The key of the row to which the conditional mutation should be applied.
bytes row_key = 2;
// The filter to be applied to the contents of the specified row. Depending
// on whether or not any results are yielded, either "true_mutations" or
// "false_mutations" will be executed. If unset, checks that the row contains
// any values at all.
RowFilter predicate_filter = 6;
// Changes to be atomically applied to the specified row if "predicate_filter"
// yields at least one cell when applied to "row_key". Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if "false_mutations" is empty, and at most
// 100000.
repeated Mutation true_mutations = 4;
// Changes to be atomically applied to the specified row if "predicate_filter"
// does not yield any cells when applied to "row_key". Entries are applied in
// order, meaning that earlier mutations can be masked by later ones.
// Must contain at least one entry if "true_mutations" is empty, and at most
// 100000.
repeated Mutation false_mutations = 5;
}
// Response message for BigtableService.CheckAndMutateRowRequest.
message CheckAndMutateRowResponse {
// Whether or not the request's "predicate_filter" yielded any results for
// the specified row.
bool predicate_matched = 1;
}
// Request message for BigtableService.ReadModifyWriteRowRequest.
message ReadModifyWriteRowRequest {
// The unique name of the table to which the read/modify/write rules should be
// applied.
string table_name = 1;
// The key of the row to which the read/modify/write rules should be applied.
bytes row_key = 2;
// Rules specifying how the specified row's contents are to be transformed
// into writes. Entries are applied in order, meaning that earlier rules will
// affect the results of later ones.
repeated ReadModifyWriteRule rules = 3;
}

View file

@ -0,0 +1,125 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.table.v1;
import "google/longrunning/operations.proto";
import "google/protobuf/duration.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableDataProto";
option java_package = "com.google.bigtable.admin.table.v1";
// A collection of user data indexed by row, column, and timestamp.
// Each table is served using the resources of its parent cluster.
message Table {
enum TimestampGranularity {
MILLIS = 0;
}
// A unique identifier of the form
// <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
string name = 1;
// If this Table is in the process of being created, the Operation used to
// track its progress. As long as this operation is present, the Table will
// not accept any Table Admin or Read/Write requests.
google.longrunning.Operation current_operation = 2;
// The column families configured for this table, mapped by column family id.
map<string, ColumnFamily> column_families = 3;
// The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
// this table. Timestamps not matching the granularity will be rejected.
// Cannot be changed once the table is created.
TimestampGranularity granularity = 4;
}
// A set of columns within a table which share a common configuration.
message ColumnFamily {
// A unique identifier of the form <table_name>/columnFamilies/[-_.a-zA-Z0-9]+
// The last segment is the same as the "name" field in
// google.bigtable.v1.Family.
string name = 1;
// Garbage collection expression specified by the following grammar:
// GC = EXPR
// | "" ;
// EXPR = EXPR, "||", EXPR (* lowest precedence *)
// | EXPR, "&&", EXPR
// | "(", EXPR, ")" (* highest precedence *)
// | PROP ;
// PROP = "version() >", NUM32
// | "age() >", NUM64, [ UNIT ] ;
// NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *)
// NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *)
// UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *)
// GC expressions can be up to 500 characters in length
//
// The different types of PROP are defined as follows:
// version() - cell index, counting from most recent and starting at 1
// age() - age of the cell (current time minus cell timestamp)
//
// Example: "version() > 3 || (age() > 3d && version() > 1)"
// drop cells beyond the most recent three, and drop cells older than three
// days unless they're the most recent cell in the row/column
//
// Garbage collection executes opportunistically in the background, and so
// it's possible for reads to return a cell even if it matches the active GC
// expression for its family.
string gc_expression = 2;
// Garbage collection rule specified as a protobuf.
// Supersedes `gc_expression`.
// Must serialize to at most 500 bytes.
//
// NOTE: Garbage collection executes opportunistically in the background, and
// so it's possible for reads to return a cell even if it matches the active
// GC expression for its family.
GcRule gc_rule = 3;
}
// Rule for determining which cells to delete during garbage collection.
message GcRule {
// A GcRule which deletes cells matching all of the given rules.
message Intersection {
// Only delete cells which would be deleted by every element of `rules`.
repeated GcRule rules = 1;
}
// A GcRule which deletes cells matching any of the given rules.
message Union {
// Delete cells which would be deleted by any element of `rules`.
repeated GcRule rules = 1;
}
oneof rule {
// Delete all cells in a column except the most recent N.
int32 max_num_versions = 1;
// Delete cells in a column older than the given age.
// Values must be at least one millisecond, and will be truncated to
// microsecond granularity.
google.protobuf.Duration max_age = 2;
// Delete cells that would be deleted by every nested rule.
Intersection intersection = 3;
// Delete cells that would be deleted by any nested rule.
Union union = 4;
}
}

View file

@ -0,0 +1,74 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.table.v1;
import "google/api/annotations.proto";
import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto";
import "google/protobuf/empty.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableServicesProto";
option java_package = "com.google.bigtable.admin.table.v1";
// Service for creating, configuring, and deleting Cloud Bigtable tables.
// Provides access to the table schemas only, not the data stored within the tables.
service BigtableTableService {
// Creates a new table, to be served from a specified cluster.
// The table can be created with a full set of initial column families,
// specified in the request.
rpc CreateTable(CreateTableRequest) returns (Table) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" };
}
// Lists the names of all tables served from a specified cluster.
rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" };
}
// Gets the schema of the specified table, including its column families.
rpc GetTable(GetTableRequest) returns (Table) {
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
}
// Permanently deletes a specified table and all of its data.
rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
}
// Changes the name of a specified table.
// Cannot be used to move tables between clusters, zones, or projects.
rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" };
}
// Creates a new column family within a specified table.
rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) {
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" };
}
// Changes the configuration of a specified column family.
rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) {
option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" };
}
// Permanently deletes a specified column family and all of its data.
rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" };
}
}

View file

@ -0,0 +1,101 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bigtable.admin.table.v1;
import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
option java_multiple_files = true;
option java_outer_classname = "BigtableTableServiceMessagesProto";
option java_package = "com.google.bigtable.admin.table.v1";
message CreateTableRequest {
// The unique name of the cluster in which to create the new table.
string name = 1;
// The name by which the new table should be referred to within the cluster,
// e.g. "foobar" rather than "<cluster_name>/tables/foobar".
string table_id = 2;
// The Table to create. The `name` field of the Table and all of its
// ColumnFamilies must be left blank, and will be populated in the response.
Table table = 3;
// The optional list of row keys that will be used to initially split the
// table into several tablets (Tablets are similar to HBase regions).
// Given two split keys, "s1" and "s2", three tablets will be created,
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
//
// Example:
// * Row keys := ["a", "apple", "custom", "customer_1", "customer_2",
// "other", "zz"]
// * initial_split_keys := ["apple", "customer_1", "customer_2", "other"]
// * Key assignment:
// - Tablet 1 [, apple) => {"a"}.
// - Tablet 2 [apple, customer_1) => {"apple", "custom"}.
// - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
// - Tablet 4 [customer_2, other) => {"customer_2"}.
// - Tablet 5 [other, ) => {"other", "zz"}.
repeated string initial_split_keys = 4;
}
message ListTablesRequest {
// The unique name of the cluster for which tables should be listed.
string name = 1;
}
message ListTablesResponse {
// The tables present in the requested cluster.
// At present, only the names of the tables are populated.
repeated Table tables = 1;
}
message GetTableRequest {
// The unique name of the requested table.
string name = 1;
}
message DeleteTableRequest {
// The unique name of the table to be deleted.
string name = 1;
}
message RenameTableRequest {
// The current unique name of the table.
string name = 1;
// The new name by which the table should be referred to within its containing
// cluster, e.g. "foobar" rather than "<cluster_name>/tables/foobar".
string new_id = 2;
}
message CreateColumnFamilyRequest {
// The unique name of the table in which to create the new column family.
string name = 1;
// The name by which the new column family should be referred to within the
// table, e.g. "foobar" rather than "<table_name>/columnFamilies/foobar".
string column_family_id = 2;
// The column family to create. The `name` field must be left blank.
ColumnFamily column_family = 3;
}
message DeleteColumnFamilyRequest {
// The unique name of the column family to be deleted.
string name = 1;
}

View file

@ -0,0 +1,144 @@
// Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.longrunning;
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/rpc/status.proto";
option java_multiple_files = true;
option java_outer_classname = "OperationsProto";
option java_package = "com.google.longrunning";
// Manages long-running operations with an API service.
//
// When an API method normally takes long time to complete, it can be designed
// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
// interface to receive the real response asynchronously by polling the
// operation resource, or using `google.watcher.v1.Watcher` interface to watch
// the response, or pass the operation resource to another API (such as Google
// Cloud Pub/Sub API) to receive the response. Any API service that returns
// long-running operations should implement the `Operations` interface so
// developers can have a consistent client experience.
service Operations {
// Gets the latest state of a long-running operation. Clients may use this
// method to poll the operation result at intervals as recommended by the API
// service.
rpc GetOperation(GetOperationRequest) returns (Operation) {
option (google.api.http) = { get: "/v1/{name=operations/**}" };
}
// Lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
option (google.api.http) = { get: "/v1/{name=operations}" };
}
// Starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients may use
// [Operations.GetOperation] or other methods to check whether the
// cancellation succeeded or the operation completed despite cancellation.
rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
}
// Deletes a long-running operation. It indicates the client is no longer
// interested in the operation result. It does not cancel the operation.
rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=operations/**}" };
}
}
// This resource represents a long-running operation that is the result of a
// network API call.
message Operation {
// The name of the operation resource, which is only unique within the same
// service that originally returns it.
string name = 1;
// Some service-specific metadata associated with the operation. It typically
// contains progress information and common metadata such as create time.
// Some services may not provide such metadata. Any method that returns a
// long-running operation should document the metadata type, if any.
google.protobuf.Any metadata = 2;
// If the value is false, it means the operation is still in progress.
// If true, the operation is completed and the `result` is available.
bool done = 3;
oneof result {
// The error result of the operation in case of failure.
google.rpc.Status error = 4;
// The normal response of the operation in case of success. If the original
// method returns no data on success, such as `Delete`, the response will be
// `google.protobuf.Empty`. If the original method is standard
// `Get`/`Create`/`Update`, the response should be the resource. For other
// methods, the response should have the type `XxxResponse`, where `Xxx`
// is the original method name. For example, if the original method name
// is `TakeSnapshot()`, the inferred response type will be
// `TakeSnapshotResponse`.
google.protobuf.Any response = 5;
}
}
// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
message GetOperationRequest {
// The name of the operation resource.
string name = 1;
}
// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsRequest {
// The name of the operation collection.
string name = 4;
// The standard List filter.
string filter = 1;
// The standard List page size.
int32 page_size = 2;
// The standard List page token.
string page_token = 3;
}
// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsResponse {
// A list of operations that match the specified filter in the request.
repeated Operation operations = 1;
// The standard List next-page token.
string next_page_token = 2;
}
// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
message CancelOperationRequest {
// The name of the operation resource to be cancelled.
string name = 1;
}
// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
message DeleteOperationRequest {
// The name of the operation resource to be deleted.
string name = 1;
}

View file

@ -0,0 +1,221 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto',
package='google.bigtable.admin.cluster.v1',
syntax='proto3',
serialized_pb=b'\n<google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto\x12 google.bigtable.admin.cluster.v1\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\x01\n\x04Zone\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12=\n\x06status\x18\x03 \x01(\x0e\x32-.google.bigtable.admin.cluster.v1.Zone.Status\"P\n\x06Status\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02OK\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x18\n\x14\x45MERGENCY_MAINENANCE\x10\x03\"\xc9\x01\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x11\x63urrent_operation\x18\x03 \x01(\x0b\x32\x1d.google.longrunning.Operation\x12\x14\n\x0c\x64isplay_name\x18\x04 \x01(\t\x12\x13\n\x0bserve_nodes\x18\x05 \x01(\x05\x12K\n\x14\x64\x65\x66\x61ult_storage_type\x18\x08 \x01(\x0e\x32-.google.bigtable.admin.cluster.v1.StorageType*H\n\x0bStorageType\x12\x17\n\x13STORAGE_UNSPECIFIED\x10\x00\x12\x0f\n\x0bSTORAGE_SSD\x10\x01\x12\x0f\n\x0bSTORAGE_HDD\x10\x02\x42\x42\n$com.google.bigtable.admin.cluster.v1B\x18\x42igtableClusterDataProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STORAGETYPE = _descriptor.EnumDescriptor(
name='StorageType',
full_name='google.bigtable.admin.cluster.v1.StorageType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STORAGE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STORAGE_SSD', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STORAGE_HDD', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=592,
serialized_end=664,
)
_sym_db.RegisterEnumDescriptor(_STORAGETYPE)
StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE)
STORAGE_UNSPECIFIED = 0
STORAGE_SSD = 1
STORAGE_HDD = 2
_ZONE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='google.bigtable.admin.cluster.v1.Zone.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PLANNED_MAINTENANCE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMERGENCY_MAINENANCE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=306,
serialized_end=386,
)
_sym_db.RegisterEnumDescriptor(_ZONE_STATUS)
_ZONE = _descriptor.Descriptor(
name='Zone',
full_name='google.bigtable.admin.cluster.v1.Zone',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.Zone.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.bigtable.admin.cluster.v1.Zone.display_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='google.bigtable.admin.cluster.v1.Zone.status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ZONE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=199,
serialized_end=386,
)
_CLUSTER = _descriptor.Descriptor(
name='Cluster',
full_name='google.bigtable.admin.cluster.v1.Cluster',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.Cluster.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_operation', full_name='google.bigtable.admin.cluster.v1.Cluster.current_operation', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_name', full_name='google.bigtable.admin.cluster.v1.Cluster.display_name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serve_nodes', full_name='google.bigtable.admin.cluster.v1.Cluster.serve_nodes', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default_storage_type', full_name='google.bigtable.admin.cluster.v1.Cluster.default_storage_type', index=4,
number=8, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=389,
serialized_end=590,
)
_ZONE.fields_by_name['status'].enum_type = _ZONE_STATUS
_ZONE_STATUS.containing_type = _ZONE
_CLUSTER.fields_by_name['current_operation'].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION
_CLUSTER.fields_by_name['default_storage_type'].enum_type = _STORAGETYPE
DESCRIPTOR.message_types_by_name['Zone'] = _ZONE
DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER
DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE
Zone = _reflection.GeneratedProtocolMessageType('Zone', (_message.Message,), dict(
DESCRIPTOR = _ZONE,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.Zone)
))
_sym_db.RegisterMessage(Zone)
Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict(
DESCRIPTOR = _CLUSTER,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.Cluster)
))
_sym_db.RegisterMessage(Cluster)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n$com.google.bigtable.admin.cluster.v1B\030BigtableClusterDataProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,538 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto',
package='google.bigtable.admin.cluster.v1',
syntax='proto3',
serialized_pb=b'\nHgoogle/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto\x12 google.bigtable.admin.cluster.v1\x1a<google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto\x1a\x1fgoogle/protobuf/timestamp.proto\" \n\x10ListZonesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"J\n\x11ListZonesResponse\x12\x35\n\x05zones\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.cluster.v1.Zone\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"#\n\x13ListClustersRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x91\x01\n\x14ListClustersResponse\x12;\n\x08\x63lusters\x18\x01 \x03(\x0b\x32).google.bigtable.admin.cluster.v1.Cluster\x12<\n\x0c\x66\x61iled_zones\x18\x02 \x03(\x0b\x32&.google.bigtable.admin.cluster.v1.Zone\"t\n\x14\x43reateClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12:\n\x07\x63luster\x18\x03 \x01(\x0b\x32).google.bigtable.admin.cluster.v1.Cluster\"\xcc\x01\n\x15\x43reateClusterMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.cluster.v1.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xf0\x01\n\x15UpdateClusterMetadata\x12\x43\n\x10original_request\x18\x01 \x01(\x0b\x32).google.bigtable.admin.cluster.v1.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x63\x61ncel_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16UndeleteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"|\n\x17UndeleteClusterMetadata\x12\x30\n\x0crequest_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampBM\n$com.google.bigtable.admin.cluster.v1B#BigtableClusterServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LISTZONESREQUEST = _descriptor.Descriptor(
name='ListZonesRequest',
full_name='google.bigtable.admin.cluster.v1.ListZonesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.ListZonesRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=205,
serialized_end=237,
)
_LISTZONESRESPONSE = _descriptor.Descriptor(
name='ListZonesResponse',
full_name='google.bigtable.admin.cluster.v1.ListZonesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='zones', full_name='google.bigtable.admin.cluster.v1.ListZonesResponse.zones', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=313,
)
_GETCLUSTERREQUEST = _descriptor.Descriptor(
name='GetClusterRequest',
full_name='google.bigtable.admin.cluster.v1.GetClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.GetClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=315,
serialized_end=348,
)
_LISTCLUSTERSREQUEST = _descriptor.Descriptor(
name='ListClustersRequest',
full_name='google.bigtable.admin.cluster.v1.ListClustersRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.ListClustersRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=350,
serialized_end=385,
)
_LISTCLUSTERSRESPONSE = _descriptor.Descriptor(
name='ListClustersResponse',
full_name='google.bigtable.admin.cluster.v1.ListClustersResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='clusters', full_name='google.bigtable.admin.cluster.v1.ListClustersResponse.clusters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='failed_zones', full_name='google.bigtable.admin.cluster.v1.ListClustersResponse.failed_zones', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=388,
serialized_end=533,
)
_CREATECLUSTERREQUEST = _descriptor.Descriptor(
name='CreateClusterRequest',
full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cluster_id', full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest.cluster_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cluster', full_name='google.bigtable.admin.cluster.v1.CreateClusterRequest.cluster', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=651,
)
_CREATECLUSTERMETADATA = _descriptor.Descriptor(
name='CreateClusterMetadata',
full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_request', full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata.original_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_time', full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata.request_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time', full_name='google.bigtable.admin.cluster.v1.CreateClusterMetadata.finish_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=654,
serialized_end=858,
)
_UPDATECLUSTERMETADATA = _descriptor.Descriptor(
name='UpdateClusterMetadata',
full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_request', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.original_request', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_time', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.request_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cancel_time', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.cancel_time', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time', full_name='google.bigtable.admin.cluster.v1.UpdateClusterMetadata.finish_time', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=861,
serialized_end=1101,
)
_DELETECLUSTERREQUEST = _descriptor.Descriptor(
name='DeleteClusterRequest',
full_name='google.bigtable.admin.cluster.v1.DeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.DeleteClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1103,
serialized_end=1139,
)
_UNDELETECLUSTERREQUEST = _descriptor.Descriptor(
name='UndeleteClusterRequest',
full_name='google.bigtable.admin.cluster.v1.UndeleteClusterRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.cluster.v1.UndeleteClusterRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1141,
serialized_end=1179,
)
_UNDELETECLUSTERMETADATA = _descriptor.Descriptor(
name='UndeleteClusterMetadata',
full_name='google.bigtable.admin.cluster.v1.UndeleteClusterMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_time', full_name='google.bigtable.admin.cluster.v1.UndeleteClusterMetadata.request_time', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finish_time', full_name='google.bigtable.admin.cluster.v1.UndeleteClusterMetadata.finish_time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1181,
serialized_end=1305,
)
_LISTZONESRESPONSE.fields_by_name['zones'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._ZONE
_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._CLUSTER
_LISTCLUSTERSRESPONSE.fields_by_name['failed_zones'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._ZONE
_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._CLUSTER
_CREATECLUSTERMETADATA.fields_by_name['original_request'].message_type = _CREATECLUSTERREQUEST
_CREATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_CREATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2._CLUSTER
_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATECLUSTERMETADATA.fields_by_name['cancel_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UNDELETECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_UNDELETECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
DESCRIPTOR.message_types_by_name['ListZonesRequest'] = _LISTZONESREQUEST
DESCRIPTOR.message_types_by_name['ListZonesResponse'] = _LISTZONESRESPONSE
DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST
DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST
DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE
DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['CreateClusterMetadata'] = _CREATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA
DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UndeleteClusterRequest'] = _UNDELETECLUSTERREQUEST
DESCRIPTOR.message_types_by_name['UndeleteClusterMetadata'] = _UNDELETECLUSTERMETADATA
ListZonesRequest = _reflection.GeneratedProtocolMessageType('ListZonesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTZONESREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListZonesRequest)
))
_sym_db.RegisterMessage(ListZonesRequest)
ListZonesResponse = _reflection.GeneratedProtocolMessageType('ListZonesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTZONESRESPONSE,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListZonesResponse)
))
_sym_db.RegisterMessage(ListZonesResponse)
GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.GetClusterRequest)
))
_sym_db.RegisterMessage(GetClusterRequest)
ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListClustersRequest)
))
_sym_db.RegisterMessage(ListClustersRequest)
ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTCLUSTERSRESPONSE,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.ListClustersResponse)
))
_sym_db.RegisterMessage(ListClustersResponse)
CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.CreateClusterRequest)
))
_sym_db.RegisterMessage(CreateClusterRequest)
CreateClusterMetadata = _reflection.GeneratedProtocolMessageType('CreateClusterMetadata', (_message.Message,), dict(
DESCRIPTOR = _CREATECLUSTERMETADATA,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.CreateClusterMetadata)
))
_sym_db.RegisterMessage(CreateClusterMetadata)
UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict(
DESCRIPTOR = _UPDATECLUSTERMETADATA,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.UpdateClusterMetadata)
))
_sym_db.RegisterMessage(UpdateClusterMetadata)
DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.DeleteClusterRequest)
))
_sym_db.RegisterMessage(DeleteClusterRequest)
UndeleteClusterRequest = _reflection.GeneratedProtocolMessageType('UndeleteClusterRequest', (_message.Message,), dict(
DESCRIPTOR = _UNDELETECLUSTERREQUEST,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.UndeleteClusterRequest)
))
_sym_db.RegisterMessage(UndeleteClusterRequest)
UndeleteClusterMetadata = _reflection.GeneratedProtocolMessageType('UndeleteClusterMetadata', (_message.Message,), dict(
DESCRIPTOR = _UNDELETECLUSTERMETADATA,
__module__ = 'google.bigtable.admin.cluster.v1.bigtable_cluster_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.cluster.v1.UndeleteClusterMetadata)
))
_sym_db.RegisterMessage(UndeleteClusterMetadata)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n$com.google.bigtable.admin.cluster.v1B#BigtableClusterServiceMessagesProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,187 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_cluster_data_pb2 as google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2
from gcloud.bigtable._generated import bigtable_cluster_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__service__messages__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto',
package='google.bigtable.admin.cluster.v1',
syntax='proto3',
serialized_pb=b'\n?google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto\x12 google.bigtable.admin.cluster.v1\x1a\x1cgoogle/api/annotations.proto\x1a<google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto\x1aHgoogle/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto2\x8f\t\n\x16\x42igtableClusterService\x12\x99\x01\n\tListZones\x12\x32.google.bigtable.admin.cluster.v1.ListZonesRequest\x1a\x33.google.bigtable.admin.cluster.v1.ListZonesResponse\"#\x82\xd3\xe4\x93\x02\x1d\x12\x1b/v1/{name=projects/*}/zones\x12\x9e\x01\n\nGetCluster\x12\x33.google.bigtable.admin.cluster.v1.GetClusterRequest\x1a).google.bigtable.admin.cluster.v1.Cluster\"0\x82\xd3\xe4\x93\x02*\x12(/v1/{name=projects/*/zones/*/clusters/*}\x12\xb0\x01\n\x0cListClusters\x12\x35.google.bigtable.admin.cluster.v1.ListClustersRequest\x1a\x36.google.bigtable.admin.cluster.v1.ListClustersResponse\"1\x82\xd3\xe4\x93\x02+\x12)/v1/{name=projects/*}/aggregated/clusters\x12\xa5\x01\n\rCreateCluster\x12\x36.google.bigtable.admin.cluster.v1.CreateClusterRequest\x1a).google.bigtable.admin.cluster.v1.Cluster\"1\x82\xd3\xe4\x93\x02+\"&/v1/{name=projects/*/zones/*}/clusters:\x01*\x12\x9a\x01\n\rUpdateCluster\x12).google.bigtable.admin.cluster.v1.Cluster\x1a).google.bigtable.admin.cluster.v1.Cluster\"3\x82\xd3\xe4\x93\x02-\x1a(/v1/{name=projects/*/zones/*/clusters/*}:\x01*\x12\x91\x01\n\rDeleteCluster\x12\x36.google.bigtable.admin.cluster.v1.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"0\x82\xd3\xe4\x93\x02**(/v1/{name=projects/*/zones/*/clusters/*}\x12\xab\x01\n\x0fUndeleteCluster\x12\x38.google.bigtable.admin.cluster.v1.UndeleteClusterRequest\x1a\x1d.google.longrunning.Operation\"?\x82\xd3\xe4\x93\x02\x39\"1/v1/{name=projects/*/zones/*/clusters/*}:undelete:\x04nullBF\n$com.google.bigtable.admin.cluster.v1B\x1c\x42igtableClusterServicesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_cluster_dot_v1_dot_bigtable__cluster__service__messages__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n$com.google.bigtable.admin.cluster.v1B\034BigtableClusterServicesProtoP\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableClusterServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ListZones(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListClusters(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CreateCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteCluster(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UndeleteCluster(self, request, context):
raise NotImplementedError()
class BetaBigtableClusterServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ListZones(self, request, timeout):
raise NotImplementedError()
ListZones.future = None
@abc.abstractmethod
def GetCluster(self, request, timeout):
raise NotImplementedError()
GetCluster.future = None
@abc.abstractmethod
def ListClusters(self, request, timeout):
raise NotImplementedError()
ListClusters.future = None
@abc.abstractmethod
def CreateCluster(self, request, timeout):
raise NotImplementedError()
CreateCluster.future = None
@abc.abstractmethod
def UpdateCluster(self, request, timeout):
raise NotImplementedError()
UpdateCluster.future = None
@abc.abstractmethod
def DeleteCluster(self, request, timeout):
raise NotImplementedError()
DeleteCluster.future = None
@abc.abstractmethod
def UndeleteCluster(self, request, timeout):
raise NotImplementedError()
UndeleteCluster.future = None
def beta_create_BigtableClusterService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.longrunning.operations_pb2
request_deserializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
}
response_serializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
}
method_implementations = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): face_utilities.unary_unary_inline(servicer.ListZones),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): face_utilities.unary_unary_inline(servicer.UndeleteCluster),
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableClusterService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_data_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2
import google.longrunning.operations_pb2
request_serializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.SerializeToString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString,
}
response_deserializers = {
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.FromString,
('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString,
}
cardinalities = {
'CreateCluster': cardinality.Cardinality.UNARY_UNARY,
'DeleteCluster': cardinality.Cardinality.UNARY_UNARY,
'GetCluster': cardinality.Cardinality.UNARY_UNARY,
'ListClusters': cardinality.Cardinality.UNARY_UNARY,
'ListZones': cardinality.Cardinality.UNARY_UNARY,
'UndeleteCluster': cardinality.Cardinality.UNARY_UNARY,
'UpdateCluster': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.cluster.v1.BigtableClusterService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,678 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/v1/bigtable_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/v1/bigtable_service_messages.proto',
package='google.bigtable.v1',
syntax='proto3',
serialized_pb=b'\n2google/bigtable/v1/bigtable_service_messages.proto\x12\x12google.bigtable.v1\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x17google/rpc/status.proto\"\x8b\x02\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x11\n\x07row_key\x18\x02 \x01(\x0cH\x00\x12\x31\n\trow_range\x18\x03 \x01(\x0b\x32\x1c.google.bigtable.v1.RowRangeH\x00\x12-\n\x07row_set\x18\x08 \x01(\x0b\x32\x1a.google.bigtable.v1.RowSetH\x00\x12-\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x1e\n\x16\x61llow_row_interleaving\x18\x06 \x01(\x08\x12\x16\n\x0enum_rows_limit\x18\x07 \x01(\x03\x42\x08\n\x06target\"\xd0\x01\n\x10ReadRowsResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12:\n\x06\x63hunks\x18\x02 \x03(\x0b\x32*.google.bigtable.v1.ReadRowsResponse.Chunk\x1ao\n\x05\x43hunk\x12\x32\n\x0crow_contents\x18\x01 \x01(\x0b\x32\x1a.google.bigtable.v1.FamilyH\x00\x12\x13\n\treset_row\x18\x02 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\x03 \x01(\x08H\x00\x42\x07\n\x05\x63hunk\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v1.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\":\n\x12MutateRowsResponse\x12$\n\x08statuses\x18\x01 \x03(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v1.ReadModifyWriteRuleB8\n\x16\x63om.google.bigtable.v1B\x1c\x42igtableServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_READROWSREQUEST = _descriptor.Descriptor(
name='ReadRowsRequest',
full_name='google.bigtable.v1.ReadRowsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.ReadRowsRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.ReadRowsRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_range', full_name='google.bigtable.v1.ReadRowsRequest.row_range', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_set', full_name='google.bigtable.v1.ReadRowsRequest.row_set', index=3,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='filter', full_name='google.bigtable.v1.ReadRowsRequest.filter', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_row_interleaving', full_name='google.bigtable.v1.ReadRowsRequest.allow_row_interleaving', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_rows_limit', full_name='google.bigtable.v1.ReadRowsRequest.num_rows_limit', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='target', full_name='google.bigtable.v1.ReadRowsRequest.target',
index=0, containing_type=None, fields=[]),
],
serialized_start=140,
serialized_end=407,
)
_READROWSRESPONSE_CHUNK = _descriptor.Descriptor(
name='Chunk',
full_name='google.bigtable.v1.ReadRowsResponse.Chunk',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_contents', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.row_contents', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reset_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.reset_row', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='commit_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.commit_row', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='chunk', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.chunk',
index=0, containing_type=None, fields=[]),
],
serialized_start=507,
serialized_end=618,
)
_READROWSRESPONSE = _descriptor.Descriptor(
name='ReadRowsResponse',
full_name='google.bigtable.v1.ReadRowsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.ReadRowsResponse.row_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chunks', full_name='google.bigtable.v1.ReadRowsResponse.chunks', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_READROWSRESPONSE_CHUNK, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=410,
serialized_end=618,
)
_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor(
name='SampleRowKeysRequest',
full_name='google.bigtable.v1.SampleRowKeysRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.SampleRowKeysRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=620,
serialized_end=662,
)
_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor(
name='SampleRowKeysResponse',
full_name='google.bigtable.v1.SampleRowKeysResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.SampleRowKeysResponse.row_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='offset_bytes', full_name='google.bigtable.v1.SampleRowKeysResponse.offset_bytes', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=664,
serialized_end=726,
)
_MUTATEROWREQUEST = _descriptor.Descriptor(
name='MutateRowRequest',
full_name='google.bigtable.v1.MutateRowRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.MutateRowRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.MutateRowRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.bigtable.v1.MutateRowRequest.mutations', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=728,
serialized_end=832,
)
_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor(
name='Entry',
full_name='google.bigtable.v1.MutateRowsRequest.Entry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.MutateRowsRequest.Entry.row_key', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mutations', full_name='google.bigtable.v1.MutateRowsRequest.Entry.mutations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=938,
serialized_end=1011,
)
_MUTATEROWSREQUEST = _descriptor.Descriptor(
name='MutateRowsRequest',
full_name='google.bigtable.v1.MutateRowsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.MutateRowsRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entries', full_name='google.bigtable.v1.MutateRowsRequest.entries', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_MUTATEROWSREQUEST_ENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=835,
serialized_end=1011,
)
_MUTATEROWSRESPONSE = _descriptor.Descriptor(
name='MutateRowsResponse',
full_name='google.bigtable.v1.MutateRowsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='statuses', full_name='google.bigtable.v1.MutateRowsResponse.statuses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1013,
serialized_end=1071,
)
_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor(
name='CheckAndMutateRowRequest',
full_name='google.bigtable.v1.CheckAndMutateRowRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.CheckAndMutateRowRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.CheckAndMutateRowRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predicate_filter', full_name='google.bigtable.v1.CheckAndMutateRowRequest.predicate_filter', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='true_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.true_mutations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='false_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.false_mutations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1074,
serialized_end=1303,
)
_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor(
name='CheckAndMutateRowResponse',
full_name='google.bigtable.v1.CheckAndMutateRowResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='predicate_matched', full_name='google.bigtable.v1.CheckAndMutateRowResponse.predicate_matched', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1305,
serialized_end=1359,
)
_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor(
name='ReadModifyWriteRowRequest',
full_name='google.bigtable.v1.ReadModifyWriteRowRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='row_key', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.row_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.rules', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1361,
serialized_end=1481,
)
_READROWSREQUEST.fields_by_name['row_range'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWRANGE
_READROWSREQUEST.fields_by_name['row_set'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWSET
_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER
_READROWSREQUEST.oneofs_by_name['target'].fields.append(
_READROWSREQUEST.fields_by_name['row_key'])
_READROWSREQUEST.fields_by_name['row_key'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target']
_READROWSREQUEST.oneofs_by_name['target'].fields.append(
_READROWSREQUEST.fields_by_name['row_range'])
_READROWSREQUEST.fields_by_name['row_range'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target']
_READROWSREQUEST.oneofs_by_name['target'].fields.append(
_READROWSREQUEST.fields_by_name['row_set'])
_READROWSREQUEST.fields_by_name['row_set'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target']
_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._FAMILY
_READROWSRESPONSE_CHUNK.containing_type = _READROWSRESPONSE
_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append(
_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'])
_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk']
_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append(
_READROWSRESPONSE_CHUNK.fields_by_name['reset_row'])
_READROWSRESPONSE_CHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk']
_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append(
_READROWSRESPONSE_CHUNK.fields_by_name['commit_row'])
_READROWSRESPONSE_CHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk']
_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CHUNK
_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST
_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY
_MUTATEROWSRESPONSE.fields_by_name['statuses'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER
_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION
_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._READMODIFYWRITERULE
DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST
DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE
DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST
DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE
DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST
DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST
DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE
DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST
DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE
DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST
ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict(
DESCRIPTOR = _READROWSREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsRequest)
))
_sym_db.RegisterMessage(ReadRowsRequest)
ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict(
Chunk = _reflection.GeneratedProtocolMessageType('Chunk', (_message.Message,), dict(
DESCRIPTOR = _READROWSRESPONSE_CHUNK,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse.Chunk)
))
,
DESCRIPTOR = _READROWSRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse)
))
_sym_db.RegisterMessage(ReadRowsResponse)
_sym_db.RegisterMessage(ReadRowsResponse.Chunk)
SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict(
DESCRIPTOR = _SAMPLEROWKEYSREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysRequest)
))
_sym_db.RegisterMessage(SampleRowKeysRequest)
SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict(
DESCRIPTOR = _SAMPLEROWKEYSRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysResponse)
))
_sym_db.RegisterMessage(SampleRowKeysResponse)
MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEROWREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowRequest)
))
_sym_db.RegisterMessage(MutateRowRequest)
MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict(
Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict(
DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest.Entry)
))
,
DESCRIPTOR = _MUTATEROWSREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest)
))
_sym_db.RegisterMessage(MutateRowsRequest)
_sym_db.RegisterMessage(MutateRowsRequest.Entry)
MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEROWSRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsResponse)
))
_sym_db.RegisterMessage(MutateRowsResponse)
CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict(
DESCRIPTOR = _CHECKANDMUTATEROWREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowRequest)
))
_sym_db.RegisterMessage(CheckAndMutateRowRequest)
CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict(
DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowResponse)
))
_sym_db.RegisterMessage(CheckAndMutateRowResponse)
ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict(
DESCRIPTOR = _READMODIFYWRITEROWREQUEST,
__module__ = 'google.bigtable.v1.bigtable_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadModifyWriteRowRequest)
))
_sym_db.RegisterMessage(ReadModifyWriteRowRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\034BigtableServiceMessagesProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,167 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/v1/bigtable_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2
from gcloud.bigtable._generated import bigtable_service_messages_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/v1/bigtable_service.proto',
package='google.bigtable.v1',
syntax='proto3',
serialized_pb=b'\n)google/bigtable/v1/bigtable_service.proto\x12\x12google.bigtable.v1\x1a\x1cgoogle/api/annotations.proto\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x32google/bigtable/v1/bigtable_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\xdd\x08\n\x0f\x42igtableService\x12\xa5\x01\n\x08ReadRows\x12#.google.bigtable.v1.ReadRowsRequest\x1a$.google.bigtable.v1.ReadRowsResponse\"L\x82\xd3\xe4\x93\x02\x46\"A/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read:\x01*0\x01\x12\xb7\x01\n\rSampleRowKeys\x12(.google.bigtable.v1.SampleRowKeysRequest\x1a).google.bigtable.v1.SampleRowKeysResponse\"O\x82\xd3\xe4\x93\x02I\x12G/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys0\x01\x12\xa3\x01\n\tMutateRow\x12$.google.bigtable.v1.MutateRowRequest\x1a\x16.google.protobuf.Empty\"X\x82\xd3\xe4\x93\x02R\"M/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate:\x01*\x12\xaa\x01\n\nMutateRows\x12%.google.bigtable.v1.MutateRowsRequest\x1a&.google.bigtable.v1.MutateRowsResponse\"M\x82\xd3\xe4\x93\x02G\"B/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows:\x01*\x12\xd2\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v1.CheckAndMutateRowRequest\x1a-.google.bigtable.v1.CheckAndMutateRowResponse\"`\x82\xd3\xe4\x93\x02Z\"U/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate:\x01*\x12\xbf\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v1.ReadModifyWriteRowRequest\x1a\x17.google.bigtable.v1.Row\"a\x82\xd3\xe4\x93\x02[\"V/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite:\x01*B4\n\x16\x63om.google.bigtable.v1B\x15\x42igtableServicesProtoP\x01\x88\x01\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\025BigtableServicesProtoP\001\210\001\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ReadRows(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def SampleRowKeys(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def MutateRow(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def MutateRows(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CheckAndMutateRow(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ReadModifyWriteRow(self, request, context):
raise NotImplementedError()
class BetaBigtableServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def ReadRows(self, request, timeout):
raise NotImplementedError()
@abc.abstractmethod
def SampleRowKeys(self, request, timeout):
raise NotImplementedError()
@abc.abstractmethod
def MutateRow(self, request, timeout):
raise NotImplementedError()
MutateRow.future = None
@abc.abstractmethod
def MutateRows(self, request, timeout):
raise NotImplementedError()
MutateRows.future = None
@abc.abstractmethod
def CheckAndMutateRow(self, request, timeout):
raise NotImplementedError()
CheckAndMutateRow.future = None
@abc.abstractmethod
def ReadModifyWriteRow(self, request, timeout):
raise NotImplementedError()
ReadModifyWriteRow.future = None
def beta_create_BigtableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_data_pb2
request_deserializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.FromString,
('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.FromString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.FromString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.FromString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.FromString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.FromString,
}
response_serializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.SerializeToString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.SerializeToString,
}
method_implementations = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow),
('google.bigtable.v1.BigtableService', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow),
('google.bigtable.v1.BigtableService', 'MutateRows'): face_utilities.unary_unary_inline(servicer.MutateRows),
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow),
('google.bigtable.v1.BigtableService', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows),
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_service_messages_pb2
import gcloud.bigtable._generated.bigtable_data_pb2
request_serializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.SerializeToString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.SerializeToString,
}
response_deserializers = {
('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.FromString,
('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.FromString,
('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.FromString,
('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.FromString,
('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.FromString,
}
cardinalities = {
'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY,
'MutateRow': cardinality.Cardinality.UNARY_UNARY,
'MutateRows': cardinality.Cardinality.UNARY_UNARY,
'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY,
'ReadRows': cardinality.Cardinality.UNARY_STREAM,
'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.v1.BigtableService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,377 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_data.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_data.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\n8google/bigtable/admin/table/v1/bigtable_table_data.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xfd\x02\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x11\x63urrent_operation\x18\x02 \x01(\x0b\x32\x1d.google.longrunning.Operation\x12R\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x39.google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry\x12O\n\x0bgranularity\x18\x04 \x01(\x0e\x32:.google.bigtable.admin.table.v1.Table.TimestampGranularity\x1a\x63\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily:\x02\x38\x01\"\"\n\x14TimestampGranularity\x12\n\n\x06MILLIS\x10\x00\"l\n\x0c\x43olumnFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rgc_expression\x18\x02 \x01(\t\x12\x37\n\x07gc_rule\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\"\xed\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12K\n\x0cintersection\x18\x03 \x01(\x0b\x32\x33.google.bigtable.admin.table.v1.GcRule.IntersectionH\x00\x12=\n\x05union\x18\x04 \x01(\x0b\x32,.google.bigtable.admin.table.v1.GcRule.UnionH\x00\x1a\x45\n\x0cIntersection\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\x1a>\n\x05Union\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRuleB\x06\n\x04ruleB>\n\"com.google.bigtable.admin.table.v1B\x16\x42igtableTableDataProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor(
name='TimestampGranularity',
full_name='google.bigtable.admin.table.v1.Table.TimestampGranularity',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MILLIS', index=0, number=0,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=509,
serialized_end=543,
)
_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY)
_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor(
name='ColumnFamiliesEntry',
full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=408,
serialized_end=507,
)
_TABLE = _descriptor.Descriptor(
name='Table',
full_name='google.bigtable.admin.table.v1.Table',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.Table.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='current_operation', full_name='google.bigtable.admin.table.v1.Table.current_operation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_families', full_name='google.bigtable.admin.table.v1.Table.column_families', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='granularity', full_name='google.bigtable.admin.table.v1.Table.granularity', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TABLE_COLUMNFAMILIESENTRY, ],
enum_types=[
_TABLE_TIMESTAMPGRANULARITY,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=543,
)
_COLUMNFAMILY = _descriptor.Descriptor(
name='ColumnFamily',
full_name='google.bigtable.admin.table.v1.ColumnFamily',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.ColumnFamily.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gc_expression', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_expression', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gc_rule', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_rule', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=545,
serialized_end=653,
)
_GCRULE_INTERSECTION = _descriptor.Descriptor(
name='Intersection',
full_name='google.bigtable.admin.table.v1.GcRule.Intersection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Intersection.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=880,
serialized_end=949,
)
_GCRULE_UNION = _descriptor.Descriptor(
name='Union',
full_name='google.bigtable.admin.table.v1.GcRule.Union',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Union.rules', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=951,
serialized_end=1013,
)
_GCRULE = _descriptor.Descriptor(
name='GcRule',
full_name='google.bigtable.admin.table.v1.GcRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='max_num_versions', full_name='google.bigtable.admin.table.v1.GcRule.max_num_versions', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_age', full_name='google.bigtable.admin.table.v1.GcRule.max_age', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='intersection', full_name='google.bigtable.admin.table.v1.GcRule.intersection', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='union', full_name='google.bigtable.admin.table.v1.GcRule.union', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule', full_name='google.bigtable.admin.table.v1.GcRule.rule',
index=0, containing_type=None, fields=[]),
],
serialized_start=656,
serialized_end=1021,
)
_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY
_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE
_TABLE.fields_by_name['current_operation'].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION
_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY
_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY
_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE
_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE
_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE
_GCRULE_INTERSECTION.containing_type = _GCRULE
_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE
_GCRULE_UNION.containing_type = _GCRULE
_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION
_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['max_num_versions'])
_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['max_age'])
_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['intersection'])
_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule']
_GCRULE.oneofs_by_name['rule'].fields.append(
_GCRULE.fields_by_name['union'])
_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule']
DESCRIPTOR.message_types_by_name['Table'] = _TABLE
DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY
DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE
Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict(
ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict(
DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry)
))
,
DESCRIPTOR = _TABLE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table)
))
_sym_db.RegisterMessage(Table)
_sym_db.RegisterMessage(Table.ColumnFamiliesEntry)
ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict(
DESCRIPTOR = _COLUMNFAMILY,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ColumnFamily)
))
_sym_db.RegisterMessage(ColumnFamily)
GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict(
Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict(
DESCRIPTOR = _GCRULE_INTERSECTION,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Intersection)
))
,
Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict(
DESCRIPTOR = _GCRULE_UNION,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Union)
))
,
DESCRIPTOR = _GCRULE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule)
))
_sym_db.RegisterMessage(GcRule)
_sym_db.RegisterMessage(GcRule.Intersection)
_sym_db.RegisterMessage(GcRule.Union)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\026BigtableTableDataProtoP\001')
_TABLE_COLUMNFAMILIESENTRY.has_options = True
_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,389 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_service_messages.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\nDgoogle/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\"\x86\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x34\n\x05table\x18\x03 \x01(\x0b\x32%.google.bigtable.admin.table.v1.Table\x12\x1a\n\x12initial_split_keys\x18\x04 \x03(\t\"!\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"K\n\x12ListTablesResponse\x12\x35\n\x06tables\x18\x01 \x03(\x0b\x32%.google.bigtable.admin.table.v1.Table\"\x1f\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"2\n\x12RenameTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06new_id\x18\x02 \x01(\t\"\x88\x01\n\x19\x43reateColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_family_id\x18\x02 \x01(\t\x12\x43\n\rcolumn_family\x18\x03 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily\")\n\x19\x44\x65leteColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\tBI\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CREATETABLEREQUEST = _descriptor.Descriptor(
name='CreateTableRequest',
full_name='google.bigtable.admin.table.v1.CreateTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.CreateTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table_id', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='table', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initial_split_keys', full_name='google.bigtable.admin.table.v1.CreateTableRequest.initial_split_keys', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=163,
serialized_end=297,
)
_LISTTABLESREQUEST = _descriptor.Descriptor(
name='ListTablesRequest',
full_name='google.bigtable.admin.table.v1.ListTablesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.ListTablesRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=299,
serialized_end=332,
)
_LISTTABLESRESPONSE = _descriptor.Descriptor(
name='ListTablesResponse',
full_name='google.bigtable.admin.table.v1.ListTablesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tables', full_name='google.bigtable.admin.table.v1.ListTablesResponse.tables', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=334,
serialized_end=409,
)
_GETTABLEREQUEST = _descriptor.Descriptor(
name='GetTableRequest',
full_name='google.bigtable.admin.table.v1.GetTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.GetTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=411,
serialized_end=442,
)
_DELETETABLEREQUEST = _descriptor.Descriptor(
name='DeleteTableRequest',
full_name='google.bigtable.admin.table.v1.DeleteTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.DeleteTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=444,
serialized_end=478,
)
_RENAMETABLEREQUEST = _descriptor.Descriptor(
name='RenameTableRequest',
full_name='google.bigtable.admin.table.v1.RenameTableRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.RenameTableRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='new_id', full_name='google.bigtable.admin.table.v1.RenameTableRequest.new_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=480,
serialized_end=530,
)
_CREATECOLUMNFAMILYREQUEST = _descriptor.Descriptor(
name='CreateColumnFamilyRequest',
full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_family_id', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='column_family', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=533,
serialized_end=669,
)
_DELETECOLUMNFAMILYREQUEST = _descriptor.Descriptor(
name='DeleteColumnFamilyRequest',
full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=671,
serialized_end=712,
)
_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE
_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE
_CREATECOLUMNFAMILYREQUEST.fields_by_name['column_family'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._COLUMNFAMILY
DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST
DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST
DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE
DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST
DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST
DESCRIPTOR.message_types_by_name['RenameTableRequest'] = _RENAMETABLEREQUEST
DESCRIPTOR.message_types_by_name['CreateColumnFamilyRequest'] = _CREATECOLUMNFAMILYREQUEST
DESCRIPTOR.message_types_by_name['DeleteColumnFamilyRequest'] = _DELETECOLUMNFAMILYREQUEST
CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateTableRequest)
))
_sym_db.RegisterMessage(CreateTableRequest)
ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesRequest)
))
_sym_db.RegisterMessage(ListTablesRequest)
ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTTABLESRESPONSE,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesResponse)
))
_sym_db.RegisterMessage(ListTablesResponse)
GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict(
DESCRIPTOR = _GETTABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GetTableRequest)
))
_sym_db.RegisterMessage(GetTableRequest)
DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteTableRequest)
))
_sym_db.RegisterMessage(DeleteTableRequest)
RenameTableRequest = _reflection.GeneratedProtocolMessageType('RenameTableRequest', (_message.Message,), dict(
DESCRIPTOR = _RENAMETABLEREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.RenameTableRequest)
))
_sym_db.RegisterMessage(RenameTableRequest)
CreateColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('CreateColumnFamilyRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATECOLUMNFAMILYREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateColumnFamilyRequest)
))
_sym_db.RegisterMessage(CreateColumnFamilyRequest)
DeleteColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('DeleteColumnFamilyRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETECOLUMNFAMILYREQUEST,
__module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2'
# @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteColumnFamilyRequest)
))
_sym_db.RegisterMessage(DeleteColumnFamilyRequest)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\001')
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,203 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bigtable/admin/table/v1/bigtable_table_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2
from gcloud.bigtable._generated import bigtable_table_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/bigtable/admin/table/v1/bigtable_table_service.proto',
package='google.bigtable.admin.table.v1',
syntax='proto3',
serialized_pb=b'\n;google/bigtable/admin/table/v1/bigtable_table_service.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\x1a\x44google/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\x89\x0b\n\x14\x42igtableTableService\x12\xa4\x01\n\x0b\x43reateTable\x12\x32.google.bigtable.admin.table.v1.CreateTableRequest\x1a%.google.bigtable.admin.table.v1.Table\":\x82\xd3\xe4\x93\x02\x34\"//v1/{name=projects/*/zones/*/clusters/*}/tables:\x01*\x12\xac\x01\n\nListTables\x12\x31.google.bigtable.admin.table.v1.ListTablesRequest\x1a\x32.google.bigtable.admin.table.v1.ListTablesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{name=projects/*/zones/*/clusters/*}/tables\x12\x9d\x01\n\x08GetTable\x12/.google.bigtable.admin.table.v1.GetTableRequest\x1a%.google.bigtable.admin.table.v1.Table\"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x94\x01\n\x0b\x44\x65leteTable\x12\x32.google.bigtable.admin.table.v1.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"9\x82\xd3\xe4\x93\x02\x33*1/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x9e\x01\n\x0bRenameTable\x12\x32.google.bigtable.admin.table.v1.RenameTableRequest\x1a\x16.google.protobuf.Empty\"C\x82\xd3\xe4\x93\x02=\"8/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename:\x01*\x12\xca\x01\n\x12\x43reateColumnFamily\x12\x39.google.bigtable.admin.table.v1.CreateColumnFamilyRequest\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"K\x82\xd3\xe4\x93\x02\x45\"@/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies:\x01*\x12\xbf\x01\n\x12UpdateColumnFamily\x12,.google.bigtable.admin.table.v1.ColumnFamily\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"M\x82\xd3\xe4\x93\x02G\x1a\x42/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}:\x01*\x12\xb3\x01\n\x12\x44\x65leteColumnFamily\x12\x39.google.bigtable.admin.table.v1.DeleteColumnFamilyRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44*B/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}BB\n\"com.google.bigtable.admin.table.v1B\x1a\x42igtableTableServicesProtoP\x01\x62\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\032BigtableTableServicesProtoP\001')
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaBigtableTableServiceServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListTables(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def GetTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def RenameTable(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CreateColumnFamily(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def UpdateColumnFamily(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteColumnFamily(self, request, context):
raise NotImplementedError()
class BetaBigtableTableServiceStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def CreateTable(self, request, timeout):
raise NotImplementedError()
CreateTable.future = None
@abc.abstractmethod
def ListTables(self, request, timeout):
raise NotImplementedError()
ListTables.future = None
@abc.abstractmethod
def GetTable(self, request, timeout):
raise NotImplementedError()
GetTable.future = None
@abc.abstractmethod
def DeleteTable(self, request, timeout):
raise NotImplementedError()
DeleteTable.future = None
@abc.abstractmethod
def RenameTable(self, request, timeout):
raise NotImplementedError()
RenameTable.future = None
@abc.abstractmethod
def CreateColumnFamily(self, request, timeout):
raise NotImplementedError()
CreateColumnFamily.future = None
@abc.abstractmethod
def UpdateColumnFamily(self, request, timeout):
raise NotImplementedError()
UpdateColumnFamily.future = None
@abc.abstractmethod
def DeleteColumnFamily(self, request, timeout):
raise NotImplementedError()
DeleteColumnFamily.future = None
def beta_create_BigtableTableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
request_deserializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
}
response_serializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
}
method_implementations = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): face_utilities.unary_unary_inline(servicer.CreateColumnFamily),
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): face_utilities.unary_unary_inline(servicer.DeleteColumnFamily),
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables),
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): face_utilities.unary_unary_inline(servicer.RenameTable),
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): face_utilities.unary_unary_inline(servicer.UpdateColumnFamily),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_BigtableTableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_data_pb2
import gcloud.bigtable._generated.bigtable_table_service_messages_pb2
import google.protobuf.empty_pb2
request_serializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.SerializeToString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString,
}
response_deserializers = {
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.FromString,
('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString,
}
cardinalities = {
'CreateColumnFamily': cardinality.Cardinality.UNARY_UNARY,
'CreateTable': cardinality.Cardinality.UNARY_UNARY,
'DeleteColumnFamily': cardinality.Cardinality.UNARY_UNARY,
'DeleteTable': cardinality.Cardinality.UNARY_UNARY,
'GetTable': cardinality.Cardinality.UNARY_UNARY,
'ListTables': cardinality.Cardinality.UNARY_UNARY,
'RenameTable': cardinality.Cardinality.UNARY_UNARY,
'UpdateColumnFamily': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.table.v1.BigtableTableService', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)

View file

@ -0,0 +1,100 @@
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaOperationsServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetOperation(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ListOperations(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def CancelOperation(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def DeleteOperation(self, request, context):
raise NotImplementedError()
class BetaOperationsStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def GetOperation(self, request, timeout):
raise NotImplementedError()
GetOperation.future = None
@abc.abstractmethod
def ListOperations(self, request, timeout):
raise NotImplementedError()
ListOperations.future = None
@abc.abstractmethod
def CancelOperation(self, request, timeout):
raise NotImplementedError()
CancelOperation.future = None
@abc.abstractmethod
def DeleteOperation(self, request, timeout):
raise NotImplementedError()
DeleteOperation.future = None
def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
request_deserializers = {
('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.FromString,
('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.FromString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.FromString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.FromString,
}
response_serializers = {
('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.SerializeToString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.SerializeToString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.SerializeToString,
}
method_implementations = {
('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation),
('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation),
('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation),
('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
import google.longrunning.operations_pb2
import google.protobuf.empty_pb2
request_serializers = {
('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.SerializeToString,
('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.SerializeToString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.SerializeToString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.SerializeToString,
}
response_deserializers = {
('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.FromString,
('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.FromString,
('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.FromString,
('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.FromString,
}
cardinalities = {
'CancelOperation': cardinality.Cardinality.UNARY_UNARY,
'DeleteOperation': cardinality.Cardinality.UNARY_UNARY,
'GetOperation': cardinality.Cardinality.UNARY_UNARY,
'ListOperations': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options)