diff --git a/e2e.py b/e2e.py new file mode 100644 index 0000000..76ec945 --- /dev/null +++ b/e2e.py @@ -0,0 +1,74 @@ +"""Simple Bigtable client demonstrating auto-generated veneer. +""" + +import argparse +import logging + +from google.bigtable.v1 import bigtable_service_api +from google.bigtable.admin.cluster.v1 import bigtable_cluster_service_api +from google.bigtable.admin.table.v1 import bigtable_table_service_api + +from google.bigtable.v1 import bigtable_data_pb2 as data +from google.bigtable.admin.cluster.v1 import bigtable_cluster_data_pb2 as cluster_data +from google.bigtable.admin.table.v1 import bigtable_table_data_pb2 as table_data + +def run(project_id): + with bigtable_service_api.BigtableServiceApi() as bigtable_api, \ + bigtable_cluster_service_api.BigtableClusterServiceApi() as cluster_api, \ + bigtable_table_service_api.BigtableTableServiceApi() as table_api: + + disp_name = 'my-cluster' + zone_name = 'projects/{0}/zones/{1}'.format(project_id, 'us-central1-c') + employee_id='employee1' + + try: + print 'Creating a cluster.' + cluster = cluster_data.Cluster(display_name=disp_name, serve_nodes=3) + cluster_name = cluster_api.create_cluster( + name=zone_name, cluster_id=disp_name, cluster=cluster).name + print 'Successfully created a cluster named {0}'.format(cluster_name) + + print 'Creating a bigtable.' + table_name = table_api.create_table( + table=table_data.Table(), name=cluster_name, table_id='my-table').name + name_column_family = table_api.create_column_family( + name=table_name, column_family_id='Name', + column_family=table_data.ColumnFamily()) + bday_column_family = table_api.create_column_family( + name=table_name, column_family_id='Birthday', + column_family=table_data.ColumnFamily()) + print 'Successfully created a table named {0}'.format(table_name) + + print 'Writing some data to the table.' + rule1 = data.ReadModifyWriteRule( + family_name='Name', column_qualifier='First Name', + append_value='Jane') + rule2 = data.ReadModifyWriteRule( + family_name='Name', column_qualifier='Last Name', append_value='Doe') + rule3 = data.ReadModifyWriteRule( + family_name='Birthday', column_qualifier='date', + append_value='Feb. 29') + bigtable_api.read_modify_write_row( + table_name=table_name, row_key=employee_id, + rules=[rule1, rule2, rule3]) + + print 'Reading the data we wrote to the table.' + for response in bigtable_api.read_rows( + table_name=table_name, row_key=employee_id): + print response + + print 'Deleting the table and cluster.' + table_api.delete_table(name=table_name) + cluster_api.delete_cluster(name=cluster_name) + + except Exception as exception: + logging.exception(exception) + print 'failed with {0}:{1}'.format(exception.code, exception.details) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--project_id', help='The numerical id of the project to create bigtable in.', + required=True) + args = parser.parse_args() + run(args.project_id) diff --git a/gcloud_bigtable/google/bigtable/admin/cluster/v1/bigtable_cluster_service_api.py b/gcloud_bigtable/google/bigtable/admin/cluster/v1/bigtable_cluster_service_api.py new file mode 100644 index 0000000..311af32 --- /dev/null +++ b/gcloud_bigtable/google/bigtable/admin/cluster/v1/bigtable_cluster_service_api.py @@ -0,0 +1,335 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# EDITING INSTRUCTIONS +# This file was generated from the file +# https://github.com/google/googleapis/blob/master/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto, +# and updates to that file get reflected here through a refresh process. +# For the short term, the refresh process will only be runnable by Google engineers. +# Manual additions are allowed because the refresh process performs +# a 3-way merge in order to preserve those manual additions. In order to not +# break the refresh process, only certain types of modifications are +# allowed. +# +# Allowed modifications: +# 1. New methods (these should be added to the end of the class) +# +# Happy editing! + +from google.bigtable.admin.cluster.v1 import bigtable_cluster_data_pb2 +from google.bigtable.admin.cluster.v1 import bigtable_cluster_service_messages_pb2 +from google.bigtable.admin.cluster.v1 import bigtable_cluster_service_pb2 +from google.gax import api_callable +from google.gax import api_utils +from google.gax import page_descriptor +from google.longrunning import operations_pb2 +from google.protobuf import timestamp_pb2 + +class BigtableClusterServiceApi(object): + """Service for managing zonal Cloud Bigtable resources.""" + + # The default address of the logging service. + _SERVICE_ADDRESS = 'bigtableclusteradmin.googleapis.com' + + # The default port of the logging service. + _DEFAULT_SERVICE_PORT = 443 + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _ALL_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-platform' + ) + + def __init__( + self, + service_path=_SERVICE_ADDRESS, + port=_DEFAULT_SERVICE_PORT, + channel=None, + ssl_creds=None, + scopes=_ALL_SCOPES, + is_idempotent_retrying=True, + max_attempts=3, + timeout=30): + self.defaults = api_callable.ApiCallableDefaults( + timeout=timeout, + max_attempts=max_attempts, + is_idempotent_retrying=is_idempotent_retrying) + + self.stub = api_utils.create_stub( + bigtable_cluster_service_pb2.beta_create_BigtableClusterService_stub, + service_path, + port, + ssl_creds=ssl_creds, + channel=channel, + scopes=scopes) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + del self.stub + + # Page descriptors + + # Service calls + def list_zones( + self, + name='', + **kwargs): + """Lists the supported zones for the given project.""" + + list_zones_request = bigtable_cluster_service_messages_pb2.ListZonesRequest( + name=name, + **kwargs) + return self.list_zones_callable()(list_zones_request) + + def list_zones_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.ListZones, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def get_cluster( + self, + name='', + **kwargs): + """Gets information about a particular cluster.""" + + get_cluster_request = bigtable_cluster_service_messages_pb2.GetClusterRequest( + name=name, + **kwargs) + return self.get_cluster_callable()(get_cluster_request) + + def get_cluster_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.GetCluster, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def list_clusters( + self, + name='', + **kwargs): + """ + Lists all clusters in the given project, along with any zones for which + cluster information could not be retrieved. + """ + + list_clusters_request = bigtable_cluster_service_messages_pb2.ListClustersRequest( + name=name, + **kwargs) + return self.list_clusters_callable()(list_clusters_request) + + def list_clusters_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.ListClusters, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def create_cluster( + self, + name='', + cluster_id='', + cluster=None, + **kwargs): + """ + Creates a cluster and begins preparing it to begin serving. The returned + cluster embeds as its "current_operation" a long-running operation which + can be used to track the progress of turning up the new cluster. + Immediately upon completion of this request: + * The cluster will be readable via the API, with all requested attributes + but no allocated resources. + Until completion of the embedded operation: + * Cancelling the operation will render the cluster immediately unreadable + via the API. + * All other attempts to modify or delete the cluster will be rejected. + Upon completion of the embedded operation: + * Billing for all successfully-allocated resources will begin (some types + may have lower than the requested levels). + * New tables can be created in the cluster. + * The cluster's allocated resource levels will be readable via the API. + The embedded operation's "metadata" field type is + [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + """ + if cluster is None: + cluster = bigtable_cluster_data_pb2.Cluster() + create_cluster_request = bigtable_cluster_service_messages_pb2.CreateClusterRequest( + name=name, + cluster_id=cluster_id, + cluster=cluster, + **kwargs) + return self.create_cluster_callable()(create_cluster_request) + + def create_cluster_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.CreateCluster, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def update_cluster( + self, + name='', + delete_time=None, + current_operation=None, + display_name='', + serve_nodes=0, + default_storage_type=None, + **kwargs): + """ + Updates a cluster, and begins allocating or releasing resources as + requested. The returned cluster embeds as its "current_operation" a + long-running operation which can be used to track the progress of updating + the cluster. + Immediately upon completion of this request: + * For resource types where a decrease in the cluster's allocation has been + requested, billing will be based on the newly-requested level. + Until completion of the embedded operation: + * Cancelling the operation will set its metadata's "cancelled_at_time", + and begin restoring resources to their pre-request values. The operation + is guaranteed to succeed at undoing all resource changes, after which + point it will terminate with a CANCELLED status. + * All other attempts to modify or delete the cluster will be rejected. + * Reading the cluster via the API will continue to give the pre-request + resource levels. + Upon completion of the embedded operation: + * Billing will begin for all successfully-allocated resources (some types + may have lower than the requested levels). + * All newly-reserved resources will be available for serving the cluster's + tables. + * The cluster's new resource levels will be readable via the API. + [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + """ + if delete_time is None: + delete_time = timestamp_pb2.Timestamp() + if current_operation is None: + current_operation = operations_pb2.Operation() + if default_storage_type is None: + default_storage_type = bigtable_cluster_data_pb2.STORAGE_UNSPECIFIED + cluster = bigtable_cluster_data_pb2.Cluster( + name=name, + delete_time=delete_time, + current_operation=current_operation, + display_name=display_name, + serve_nodes=serve_nodes, + default_storage_type=default_storage_type, + **kwargs) + return self.update_cluster_callable()(cluster) + + def update_cluster_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.UpdateCluster, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def delete_cluster( + self, + name='', + **kwargs): + """ + Marks a cluster and all of its tables for permanent deletion in 7 days. + Immediately upon completion of the request: + * Billing will cease for all of the cluster's reserved resources. + * The cluster's "delete_time" field will be set 7 days in the future. + Soon afterward: + * All tables within the cluster will become unavailable. + Prior to the cluster's "delete_time": + * The cluster can be recovered with a call to UndeleteCluster. + * All other attempts to modify or delete the cluster will be rejected. + At the cluster's "delete_time": + * The cluster and *all of its tables* will immediately and irrevocably + disappear from the API, and their data will be permanently deleted. + """ + + delete_cluster_request = bigtable_cluster_service_messages_pb2.DeleteClusterRequest( + name=name, + **kwargs) + return self.delete_cluster_callable()(delete_cluster_request) + + def delete_cluster_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.DeleteCluster, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def undelete_cluster( + self, + name='', + **kwargs): + """ + Cancels the scheduled deletion of an cluster and begins preparing it to + resume serving. The returned operation will also be embedded as the + cluster's "current_operation". + Immediately upon completion of this request: + * The cluster's "delete_time" field will be unset, protecting it from + automatic deletion. + Until completion of the returned operation: + * The operation cannot be cancelled. + Upon completion of the returned operation: + * Billing for the cluster's resources will resume. + * All tables within the cluster will be available. + [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is + [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + """ + + undelete_cluster_request = bigtable_cluster_service_messages_pb2.UndeleteClusterRequest( + name=name, + **kwargs) + return self.undelete_cluster_callable()(undelete_cluster_request) + + def undelete_cluster_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.UndeleteCluster, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + # ======== + # Manually-added methods: add custom (non-generated) methods after this point. + # ======== diff --git a/gcloud_bigtable/google/bigtable/admin/table/v1/bigtable_table_service_api.py b/gcloud_bigtable/google/bigtable/admin/table/v1/bigtable_table_service_api.py new file mode 100644 index 0000000..cc67c5b --- /dev/null +++ b/gcloud_bigtable/google/bigtable/admin/table/v1/bigtable_table_service_api.py @@ -0,0 +1,292 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# EDITING INSTRUCTIONS +# This file was generated from the file +# https://github.com/google/googleapis/blob/master/google/bigtable/admin/table/v1/bigtable_table_service.proto, +# and updates to that file get reflected here through a refresh process. +# For the short term, the refresh process will only be runnable by Google engineers. +# Manual additions are allowed because the refresh process performs +# a 3-way merge in order to preserve those manual additions. In order to not +# break the refresh process, only certain types of modifications are +# allowed. +# +# Allowed modifications: +# 1. New methods (these should be added to the end of the class) +# +# Happy editing! + +from google.bigtable.admin.table.v1 import bigtable_table_data_pb2 +from google.bigtable.admin.table.v1 import bigtable_table_service_messages_pb2 +from google.bigtable.admin.table.v1 import bigtable_table_service_pb2 +from google.gax import api_callable +from google.gax import api_utils +from google.gax import page_descriptor + +class BigtableTableServiceApi(object): + """ + Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within the tables. + """ + + # The default address of the logging service. + _SERVICE_ADDRESS = 'bigtabletableadmin.googleapis.com' + + # The default port of the logging service. + _DEFAULT_SERVICE_PORT = 443 + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _ALL_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform' + ) + + def __init__( + self, + service_path=_SERVICE_ADDRESS, + port=_DEFAULT_SERVICE_PORT, + channel=None, + ssl_creds=None, + scopes=_ALL_SCOPES, + is_idempotent_retrying=True, + max_attempts=3, + timeout=30): + self.defaults = api_callable.ApiCallableDefaults( + timeout=timeout, + max_attempts=max_attempts, + is_idempotent_retrying=is_idempotent_retrying) + + self.stub = api_utils.create_stub( + bigtable_table_service_pb2.beta_create_BigtableTableService_stub, + service_path, + port, + ssl_creds=ssl_creds, + channel=channel, + scopes=scopes) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + del self.stub + + # Page descriptors + + # Service calls + def create_table( + self, + name='', + table_id='', + table=None, + initial_split_keys=None, + **kwargs): + """ + Creates a new table, to be served from a specified cluster. + The table can be created with a full set of initial column families, + specified in the request. + """ + if table is None: + table = bigtable_table_data_pb2.Table() + if initial_split_keys is None: + initial_split_keys = [] + create_table_request = bigtable_table_service_messages_pb2.CreateTableRequest( + name=name, + table_id=table_id, + table=table, + initial_split_keys=initial_split_keys, + **kwargs) + return self.create_table_callable()(create_table_request) + + def create_table_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.CreateTable, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def list_tables( + self, + name='', + **kwargs): + """Lists the names of all tables served from a specified cluster.""" + + list_tables_request = bigtable_table_service_messages_pb2.ListTablesRequest( + name=name, + **kwargs) + return self.list_tables_callable()(list_tables_request) + + def list_tables_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.ListTables, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def get_table( + self, + name='', + **kwargs): + """Gets the schema of the specified table, including its column families.""" + + get_table_request = bigtable_table_service_messages_pb2.GetTableRequest( + name=name, + **kwargs) + return self.get_table_callable()(get_table_request) + + def get_table_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.GetTable, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def delete_table( + self, + name='', + **kwargs): + """Permanently deletes a specified table and all of its data.""" + + delete_table_request = bigtable_table_service_messages_pb2.DeleteTableRequest( + name=name, + **kwargs) + return self.delete_table_callable()(delete_table_request) + + def delete_table_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.DeleteTable, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def rename_table( + self, + name='', + new_id='', + **kwargs): + """ + Changes the name of a specified table. + Cannot be used to move tables between clusters, zones, or projects. + """ + + rename_table_request = bigtable_table_service_messages_pb2.RenameTableRequest( + name=name, + new_id=new_id, + **kwargs) + return self.rename_table_callable()(rename_table_request) + + def rename_table_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.RenameTable, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def create_column_family( + self, + name='', + column_family_id='', + column_family=None, + **kwargs): + """Creates a new column family within a specified table.""" + if column_family is None: + column_family = bigtable_table_data_pb2.ColumnFamily() + create_column_family_request = bigtable_table_service_messages_pb2.CreateColumnFamilyRequest( + name=name, + column_family_id=column_family_id, + column_family=column_family, + **kwargs) + return self.create_column_family_callable()(create_column_family_request) + + def create_column_family_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.CreateColumnFamily, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def update_column_family( + self, + name='', + gc_expression='', + gc_rule=None, + **kwargs): + """Changes the configuration of a specified column family.""" + if gc_rule is None: + gc_rule = bigtable_table_data_pb2.GcRule() + column_family = bigtable_table_data_pb2.ColumnFamily( + name=name, + gc_expression=gc_expression, + gc_rule=gc_rule, + **kwargs) + return self.update_column_family_callable()(column_family) + + def update_column_family_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.UpdateColumnFamily, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def delete_column_family( + self, + name='', + **kwargs): + """Permanently deletes a specified column family and all of its data.""" + + delete_column_family_request = bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest( + name=name, + **kwargs) + return self.delete_column_family_callable()(delete_column_family_request) + + def delete_column_family_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.DeleteColumnFamily, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + # ======== + # Manually-added methods: add custom (non-generated) methods after this point. + # ======== diff --git a/gcloud_bigtable/google/bigtable/v1/bigtable_service_api.py b/gcloud_bigtable/google/bigtable/v1/bigtable_service_api.py new file mode 100644 index 0000000..2fba147 --- /dev/null +++ b/gcloud_bigtable/google/bigtable/v1/bigtable_service_api.py @@ -0,0 +1,250 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# EDITING INSTRUCTIONS +# This file was generated from the file +# https://github.com/google/googleapis/blob/master/google/bigtable/v1/bigtable_service.proto, +# and updates to that file get reflected here through a refresh process. +# For the short term, the refresh process will only be runnable by Google engineers. +# Manual additions are allowed because the refresh process performs +# a 3-way merge in order to preserve those manual additions. In order to not +# break the refresh process, only certain types of modifications are +# allowed. +# +# Allowed modifications: +# 1. New methods (these should be added to the end of the class) +# +# Happy editing! + +from google.bigtable.v1 import bigtable_data_pb2 +from google.bigtable.v1 import bigtable_service_messages_pb2 +from google.bigtable.v1 import bigtable_service_pb2 +from google.gax import api_callable +from google.gax import api_utils +from google.gax import page_descriptor + +class BigtableServiceApi(object): + """Service for reading from and writing to existing Bigtables.""" + + # The default address of the logging service. + _SERVICE_ADDRESS = 'bigtable.googleapis.com' + + # The default port of the logging service. + _DEFAULT_SERVICE_PORT = 443 + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _ALL_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-platform' + ) + + def __init__( + self, + service_path=_SERVICE_ADDRESS, + port=_DEFAULT_SERVICE_PORT, + channel=None, + ssl_creds=None, + scopes=_ALL_SCOPES, + is_idempotent_retrying=True, + max_attempts=3, + timeout=30): + self.defaults = api_callable.ApiCallableDefaults( + timeout=timeout, + max_attempts=max_attempts, + is_idempotent_retrying=is_idempotent_retrying) + + self.stub = api_utils.create_stub( + bigtable_service_pb2.beta_create_BigtableService_stub, + service_path, + port, + ssl_creds=ssl_creds, + channel=channel, + scopes=scopes) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + del self.stub + + # Page descriptors + + # Service calls + def read_rows( + self, + table_name='', + row_key='', + row_range=None, + filter=None, + allow_row_interleaving=False, + num_rows_limit=0L, + **kwargs): + """ + Streams back the contents of all requested rows, optionally applying + the same Reader filter to each. Depending on their size, rows may be + broken up across multiple responses, but atomicity of each row will still + be preserved. + """ + if row_range is None: + row_range = bigtable_data_pb2.RowRange() + if filter is None: + filter = bigtable_data_pb2.RowFilter() + read_rows_request = bigtable_service_messages_pb2.ReadRowsRequest( + table_name=table_name, + row_key=row_key, + row_range=row_range, + filter=filter, + allow_row_interleaving=allow_row_interleaving, + num_rows_limit=num_rows_limit, + **kwargs) + return self.read_rows_callable()(read_rows_request) + + def read_rows_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.ReadRows, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def sample_row_keys( + self, + table_name='', + **kwargs): + """ + Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + + sample_row_keys_request = bigtable_service_messages_pb2.SampleRowKeysRequest( + table_name=table_name, + **kwargs) + return self.sample_row_keys_callable()(sample_row_keys_request) + + def sample_row_keys_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.SampleRowKeys, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def mutate_row( + self, + table_name='', + row_key='', + mutations=None, + **kwargs): + """ + Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by 'mutation'. + """ + if mutations is None: + mutations = [] + mutate_row_request = bigtable_service_messages_pb2.MutateRowRequest( + table_name=table_name, + row_key=row_key, + mutations=mutations, + **kwargs) + return self.mutate_row_callable()(mutate_row_request) + + def mutate_row_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.MutateRow, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def check_and_mutate_row( + self, + table_name='', + row_key='', + predicate_filter=None, + true_mutations=None, + false_mutations=None, + **kwargs): + """Mutates a row atomically based on the output of a predicate Reader filter.""" + if predicate_filter is None: + predicate_filter = bigtable_data_pb2.RowFilter() + if true_mutations is None: + true_mutations = [] + if false_mutations is None: + false_mutations = [] + check_and_mutate_row_request = bigtable_service_messages_pb2.CheckAndMutateRowRequest( + table_name=table_name, + row_key=row_key, + predicate_filter=predicate_filter, + true_mutations=true_mutations, + false_mutations=false_mutations, + **kwargs) + return self.check_and_mutate_row_callable()(check_and_mutate_row_request) + + def check_and_mutate_row_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.CheckAndMutateRow, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + def read_modify_write_row( + self, + table_name='', + row_key='', + rules=None, + **kwargs): + """ + Modifies a row atomically, reading the latest existing timestamp/value from + the specified columns and writing a new value at + max(existing timestamp, current server time) based on pre-defined + read/modify/write rules. Returns the new contents of all modified cells. + """ + if rules is None: + rules = [] + read_modify_write_row_request = bigtable_service_messages_pb2.ReadModifyWriteRowRequest( + table_name=table_name, + row_key=row_key, + rules=rules, + **kwargs) + return self.read_modify_write_row_callable()(read_modify_write_row_request) + + def read_modify_write_row_callable( + self, + is_retrying=None, + max_attempts=None): + return api_callable.idempotent_callable( + self.stub.ReadModifyWriteRow, + is_retrying=is_retrying, + max_attempts=max_attempts, + defaults=self.defaults) + + # ======== + # Manually-added methods: add custom (non-generated) methods after this point. + # ========