From 8e852877550f81bce536f834a49dccde0904cf1f Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Fri, 12 May 2017 17:48:17 -0700 Subject: [PATCH 01/12] Add Bigtable Autoscaler sample --- bigtable/autoscaler/README.rst | 131 +++++++++++++++++++++++++ bigtable/autoscaler/README.rst.in | 27 +++++ bigtable/autoscaler/autoscaler.py | 130 ++++++++++++++++++++++++ bigtable/autoscaler/autoscaler_test.py | 90 +++++++++++++++++ bigtable/autoscaler/requirements.txt | 2 + bigtable/autoscaler/strategies.py | 50 ++++++++++ bigtable/autoscaler/strategies_test.py | 30 ++++++ 7 files changed, 460 insertions(+) create mode 100644 bigtable/autoscaler/README.rst create mode 100644 bigtable/autoscaler/README.rst.in create mode 100644 bigtable/autoscaler/autoscaler.py create mode 100644 bigtable/autoscaler/autoscaler_test.py create mode 100644 bigtable/autoscaler/requirements.txt create mode 100644 bigtable/autoscaler/strategies.py create mode 100644 bigtable/autoscaler/strategies_test.py diff --git a/bigtable/autoscaler/README.rst b/bigtable/autoscaler/README.rst new file mode 100644 index 00000000000..ee3166b7eef --- /dev/null +++ b/bigtable/autoscaler/README.rst @@ -0,0 +1,131 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + +This sample demonstrates using `Stackdriver monitoring`_, +to scale Cloud Bigtable based on CPU usage. + +.. Stackdriver Monitoring: http://cloud.google.com/monitoring/docs + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +Authentication is typically done through `Application Default Credentials`_, +which means you do not have to change the code to authenticate as long as +your environment has credentials. You have a few options for setting up +authentication: + +#. When running locally, use the `Google Cloud SDK`_ + + .. code-block:: bash + + gcloud auth application-default login + + +#. When running on App Engine or Compute Engine, credentials are already + set-up. However, you may need to configure your Compute Engine instance + with `additional scopes`_. + +#. You can create a `Service Account key file`_. This file can be used to + authenticate to Google Cloud Platform services from any environment. To use + the file, set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable to + the path to the key file, for example: + + .. code-block:: bash + + export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service_account.json + +.. _Application Default Credentials: https://cloud.google.com/docs/authentication#getting_credentials_for_server-centric_flow +.. _additional scopes: https://cloud.google.com/compute/docs/authentication#using +.. _Service Account key file: https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount + +Install Dependencies +++++++++++++++++++++ + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Autoscaling example ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + + + +To run this sample: + +.. code-block:: bash + + $ python autoscaler.py + + usage: autoscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] + [--low_cpu_threshold LOW_CPU_THRESHOLD] + [--high_latency_threshold HIGH_LATENCY_THRESHOLD] + [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] + + Scales Bigtable clusters based on CPU usage. + + optional arguments: + -h, --help show this help message and exit + --high_cpu_threshold HIGH_CPU_THRESHOLD + If Bigtable CPU usages is above this threshold, scale + up + --low_cpu_threshold LOW_CPU_THRESHOLD + If Bigtable CPU usages is above this threshold, scale + up + --high_latency_threshold HIGH_LATENCY_THRESHOLD + If Bigtable CPU usages is above this threshold, scale + up + --short_sleep SHORT_SLEEP + How long to sleep in seconds between checking metrics + after no scale operation + --long_sleep LONG_SLEEP + How long to sleep in seconds between checking metrics + after a scaling operation + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/bigtable/autoscaler/README.rst.in b/bigtable/autoscaler/README.rst.in new file mode 100644 index 00000000000..26129bcdc8a --- /dev/null +++ b/bigtable/autoscaler/README.rst.in @@ -0,0 +1,27 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +description: | + This sample demonstrates using `Stackdriver monitoring`_, + to scale Cloud Bigtable based on CPU usage. + + .. Stackdriver Monitoring: http://cloud.google.com/monitoring/docs + +setup: +- auth +- install_deps + +samples: +- name: Autoscaling example + file: autoscaler.py + show_help: true + +cloud_client_library: true diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py new file mode 100644 index 00000000000..42132beaf1f --- /dev/null +++ b/bigtable/autoscaler/autoscaler.py @@ -0,0 +1,130 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sample that demonstrates how to use Bigtable Stackdriver metrics to +autoscale Google Cloud Bigtable.""" + +import argparse +import os +import time + +from google.cloud import bigtable +from google.cloud import monitoring + +import strategies + +CPU_METRIC = 'bigtable.googleapis.com/cluster/cpu_load' + + +def get_cpu_load(): + """Returns the most recent Bigtable CPU load measurement.""" + client = monitoring.Client() + query = client.query(CPU_METRIC, minutes=5) + return list(query)[0].points[0].value + + +def scale_bigtable(bigtable_instance, up): + """Scales the number of Bigtable nodes up or down. + + Args: + bigtable_instance (str): Cloud Bigtable instance id to scale + up (bool): If true, scale up, otherwise scale down + """ + bigtable_client = bigtable.Client(admin=True) + instance = bigtable_client.instance(bigtable_instance) + instance.reload() + + cluster = instance.cluster('{}-cluster'.format(bigtable_instance)) + cluster.reload() + + current_node_count = cluster.serve_nodes + + if current_node_count <= 3 and not up: + # Can't downscale lower than 3 nodes + return + + if up: + strategies_dict = strategies.UPSCALE_STRATEGIES + else: + strategies_dict = strategies.DOWNSCALE_STRATEGIES + + strategy = strategies_dict['incremental'] + new_node_count = strategy(cluster.serve_nodes) + cluster.serve_nodes = new_node_count + cluster.update() + print('Scaled from {} up to {} nodes.'.format( + current_node_count, new_node_count)) + + +def main( + bigtable_instance, + high_cpu_threshold, + low_cpu_threshold, + short_sleep, + long_sleep): + """Main loop runner that autoscales Bigtable. + + Args: + bigtable_instance (str): Cloud Bigtable instance id to autoscale + high_cpu_threshold (float): If CPU is higher than this, scale up. + low_cpu_threshold (float): If CPU is higher than this, scale down. + short_sleep (int): How long to sleep after no operation + long_sleep (int): How long to sleep after the cluster nodes are + changed + """ + cluster_cpu = get_cpu_load() + print('Detected cpu of {}'.format(cluster_cpu)) + if cluster_cpu > high_cpu_threshold: + scale_bigtable(bigtable_instance, True) + time.sleep(long_sleep) + elif cluster_cpu < low_cpu_threshold: + scale_bigtable(bigtable_instance, False) + time.sleep(short_sleep) + else: + print('CPU within threshold, sleeping.') + time.sleep(short_sleep) + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Scales Bigtable clusters based on CPU usage.') + parser.add_argument( + 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + parser.add_argument( + '--high_cpu_threshold', + help='If Bigtable CPU usages is above this threshold, scale up', + default=0.6) + parser.add_argument( + '--low_cpu_threshold', + help='If Bigtable CPU usages is above this threshold, scale up', + default=0.2) + parser.add_argument( + '--short_sleep', + help='How long to sleep in seconds between checking metrics after no ' + 'scale operation', + default=60) + parser.add_argument( + '--long_sleep', + help='How long to sleep in seconds between checking metrics after a ' + 'scaling operation', + default=60 * 10) + args = parser.parse_args() + + while True: + main( + args.project_id, + args.bigtable_instance, + float(args.high_cpu_threshold), + float(args.low_cpu_threshold), + int(args.short_sleep), + int(args.long_sleep)) diff --git a/bigtable/autoscaler/autoscaler_test.py b/bigtable/autoscaler/autoscaler_test.py new file mode 100644 index 00000000000..93524fa55c4 --- /dev/null +++ b/bigtable/autoscaler/autoscaler_test.py @@ -0,0 +1,90 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit and system tests for autoscaler.py""" + +import os +import time + +from google.cloud import bigtable +from mock import patch + +from autoscaler import get_cpu_load +from autoscaler import main +from autoscaler import scale_bigtable + +BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER'] + +# System tests to verify API calls succeed + + +def test_get_cpu_load(): + assert get_cpu_load() > 0.0 + + +def test_scale_bigtable(): + bigtable_client = bigtable.Client(admin=True) + instance = bigtable_client.instance(BIGTABLE_INSTANCE) + instance.reload() + + cluster = instance.cluster('{}-cluster'.format(BIGTABLE_INSTANCE)) + cluster.reload() + original_node_count = cluster.serve_nodes + + scale_bigtable(BIGTABLE_INSTANCE, True) + + time.sleep(3) + cluster.reload() + + new_node_count = cluster.serve_nodes + assert (new_node_count == (original_node_count + 2)) + + scale_bigtable(BIGTABLE_INSTANCE, False) + time.sleep(3) + cluster.reload() + final_node_count = cluster.serve_nodes + assert final_node_count == original_node_count + + +# Unit test for logic + +@patch('time.sleep') +@patch('autoscaler.get_cpu_load') +@patch('autoscaler.scale_bigtable') +def test_main(scale_bigtable, get_cpu_load, sleep): + SHORT_SLEEP = 5 + LONG_SLEEP = 10 + get_cpu_load.return_value = 0.5 + + main(BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_not_called() + scale_bigtable.reset_mock() + + get_cpu_load.return_value = 0.7 + main(BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, True) + scale_bigtable.reset_mock() + + get_cpu_load.return_value = 0.2 + main(BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, False) + + scale_bigtable.reset_mock() + + + + + + + diff --git a/bigtable/autoscaler/requirements.txt b/bigtable/autoscaler/requirements.txt new file mode 100644 index 00000000000..3f7710cd49d --- /dev/null +++ b/bigtable/autoscaler/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-bigtable==0.23.1 +google-cloud-monitoring==0.24.0 diff --git a/bigtable/autoscaler/strategies.py b/bigtable/autoscaler/strategies.py new file mode 100644 index 00000000000..ef686e8d831 --- /dev/null +++ b/bigtable/autoscaler/strategies.py @@ -0,0 +1,50 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module contains various strategies for upscaling and downscaling +Bigtable.""" + + +def _upscale_incremental_strategy(current_nodes): + """Simple scaling strategy, increase nodes by 2.""" + return current_nodes + 2 + + +UPSCALE_STRATEGIES = { + 'incremental': _upscale_incremental_strategy +} + +"""UPSCALE_STRATEGIES contains a dict of functions for scaling Bigtable. + +The function signature should accept the current number of nodes and return +the new number of nodes to scale to. +""" + + +def _downscale_incremental_strategy(current_nodes): + """Simple downscale strategy: decrease nodes by 2.""" + new_nodes = current_nodes - 2 + if new_nodes < 3: # 3 is minimum number of CBT nodes + return 3 + return new_nodes + +DOWNSCALE_STRATEGIES = { + 'incremental': _downscale_incremental_strategy +} + +"""DOWNSCALE_STRATEGIES contains a dict of functions for scaling Bigtable. + +The function signature should accept the current number of nodes and return +the new number of nodes to scale to. +""" diff --git a/bigtable/autoscaler/strategies_test.py b/bigtable/autoscaler/strategies_test.py new file mode 100644 index 00000000000..57bbfd5f9b5 --- /dev/null +++ b/bigtable/autoscaler/strategies_test.py @@ -0,0 +1,30 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for autoscaler.py""" + +from strategies import DOWNSCALE_STRATEGIES +from strategies import UPSCALE_STRATEGIES + + +def test_downscale(): + downscale_strategy = DOWNSCALE_STRATEGIES['incremental'] + assert downscale_strategy(5) == 3 + assert downscale_strategy(4) == 3 + assert downscale_strategy(3) == 3 + + +def test_upscale(): + upscale_strategy = UPSCALE_STRATEGIES['incremental'] + assert upscale_strategy(3) == 5 From dbffb1566ecbeb5e7657e7adc3b6bf309c926c97 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Fri, 12 May 2017 17:49:07 -0700 Subject: [PATCH 02/12] update readme --- bigtable/autoscaler/README.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bigtable/autoscaler/README.rst b/bigtable/autoscaler/README.rst index ee3166b7eef..7ab8608fdbe 100644 --- a/bigtable/autoscaler/README.rst +++ b/bigtable/autoscaler/README.rst @@ -87,11 +87,14 @@ To run this sample: usage: autoscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] - [--high_latency_threshold HIGH_LATENCY_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] + instance_id Scales Bigtable clusters based on CPU usage. + positional arguments: + instance_id ID of the Cloud Bigtable instance to connect to. + optional arguments: -h, --help show this help message and exit --high_cpu_threshold HIGH_CPU_THRESHOLD @@ -100,9 +103,6 @@ To run this sample: --low_cpu_threshold LOW_CPU_THRESHOLD If Bigtable CPU usages is above this threshold, scale up - --high_latency_threshold HIGH_LATENCY_THRESHOLD - If Bigtable CPU usages is above this threshold, scale - up --short_sleep SHORT_SLEEP How long to sleep in seconds between checking metrics after no scale operation From 0ab3bc5d53e6840047b743ca86a75f6f3d949998 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Fri, 12 May 2017 17:51:08 -0700 Subject: [PATCH 03/12] add return type to docstring --- bigtable/autoscaler/autoscaler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index 42132beaf1f..d4318c19bbe 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -28,7 +28,11 @@ def get_cpu_load(): - """Returns the most recent Bigtable CPU load measurement.""" + """Returns the most recent Bigtable CPU load measurement. + + Returns: + float: The most recent Bigtable CPU usage metric + """ client = monitoring.Client() query = client.query(CPU_METRIC, minutes=5) return list(query)[0].points[0].value From 80786675e368cd39d3591fac139f02e6a9d9d7a0 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Fri, 12 May 2017 17:52:48 -0700 Subject: [PATCH 04/12] remove trailing whitespace --- bigtable/autoscaler/autoscaler_test.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/bigtable/autoscaler/autoscaler_test.py b/bigtable/autoscaler/autoscaler_test.py index 93524fa55c4..29a131fa1c6 100644 --- a/bigtable/autoscaler/autoscaler_test.py +++ b/bigtable/autoscaler/autoscaler_test.py @@ -81,10 +81,3 @@ def test_main(scale_bigtable, get_cpu_load, sleep): scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, False) scale_bigtable.reset_mock() - - - - - - - From dc4b23a646d8872faf3bf4d4389f07af5d77c589 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Fri, 12 May 2017 17:57:40 -0700 Subject: [PATCH 05/12] Fix main call --- bigtable/autoscaler/autoscaler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index d4318c19bbe..388ce1b1b82 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -126,7 +126,6 @@ def main( while True: main( - args.project_id, args.bigtable_instance, float(args.high_cpu_threshold), float(args.low_cpu_threshold), From 1ea09feba816b4992e004a13c279b42290af4410 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Fri, 12 May 2017 17:58:20 -0700 Subject: [PATCH 06/12] fix instance flag name --- bigtable/autoscaler/autoscaler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index 388ce1b1b82..5ade397f1c4 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -103,7 +103,8 @@ def main( parser = argparse.ArgumentParser( description='Scales Bigtable clusters based on CPU usage.') parser.add_argument( - 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + 'bigtable_instance', help='ID of the Cloud Bigtable instance to ' + 'connect to.') parser.add_argument( '--high_cpu_threshold', help='If Bigtable CPU usages is above this threshold, scale up', From bd69e29b3ef8999f5a65d6b0198649d920827e54 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Mon, 15 May 2017 13:37:18 -0700 Subject: [PATCH 07/12] style and jonwayne review --- bigtable/autoscaler/autoscaler.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index 5ade397f1c4..848924ebb39 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -16,7 +16,6 @@ autoscale Google Cloud Bigtable.""" import argparse -import os import time from google.cloud import bigtable @@ -24,8 +23,6 @@ import strategies -CPU_METRIC = 'bigtable.googleapis.com/cluster/cpu_load' - def get_cpu_load(): """Returns the most recent Bigtable CPU load measurement. @@ -34,8 +31,10 @@ def get_cpu_load(): float: The most recent Bigtable CPU usage metric """ client = monitoring.Client() - query = client.query(CPU_METRIC, minutes=5) - return list(query)[0].points[0].value + query = client.query('bigtable.googleapis.com/cluster/cpu_load', minutes=5) + time_series = list(query)[0] + recent_time_series = time_series[0] + return recent_time_series.points[0].value def scale_bigtable(bigtable_instance, up): @@ -99,6 +98,7 @@ def main( print('CPU within threshold, sleeping.') time.sleep(short_sleep) + if __name__ == '__main__': parser = argparse.ArgumentParser( description='Scales Bigtable clusters based on CPU usage.') From 7e70fa8f44b238c4ffb95718ee79125899f947a7 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Mon, 15 May 2017 13:59:31 -0700 Subject: [PATCH 08/12] lint and jonwayne --- bigtable/autoscaler/autoscaler.py | 13 +++++++++---- bigtable/autoscaler/strategies.py | 1 + 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index 848924ebb39..5d0fcaba3a3 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -37,12 +37,17 @@ def get_cpu_load(): return recent_time_series.points[0].value -def scale_bigtable(bigtable_instance, up): +def scale_bigtable(bigtable_instance, scale_up): """Scales the number of Bigtable nodes up or down. + Edits the number of nodes in the Bigtable cluster to be increased + or decreased, depending on the `scale_up` boolean argument. Currently + the `incremental` strategy from `strategies.py` is used. + + Args: bigtable_instance (str): Cloud Bigtable instance id to scale - up (bool): If true, scale up, otherwise scale down + scale_up (bool): If true, scale up, otherwise scale down """ bigtable_client = bigtable.Client(admin=True) instance = bigtable_client.instance(bigtable_instance) @@ -53,11 +58,11 @@ def scale_bigtable(bigtable_instance, up): current_node_count = cluster.serve_nodes - if current_node_count <= 3 and not up: + if current_node_count <= 3 and not scale_up: # Can't downscale lower than 3 nodes return - if up: + if scale_up: strategies_dict = strategies.UPSCALE_STRATEGIES else: strategies_dict = strategies.DOWNSCALE_STRATEGIES diff --git a/bigtable/autoscaler/strategies.py b/bigtable/autoscaler/strategies.py index ef686e8d831..9b1c7483b9b 100644 --- a/bigtable/autoscaler/strategies.py +++ b/bigtable/autoscaler/strategies.py @@ -39,6 +39,7 @@ def _downscale_incremental_strategy(current_nodes): return 3 return new_nodes + DOWNSCALE_STRATEGIES = { 'incremental': _downscale_incremental_strategy } From fcc66e36606d1165c24e4a9bef1f1393279fcb3f Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Mon, 15 May 2017 14:40:46 -0700 Subject: [PATCH 09/12] fix tests --- bigtable/autoscaler/autoscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index 5d0fcaba3a3..449ca886bef 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -32,7 +32,7 @@ def get_cpu_load(): """ client = monitoring.Client() query = client.query('bigtable.googleapis.com/cluster/cpu_load', minutes=5) - time_series = list(query)[0] + time_series = list(query) recent_time_series = time_series[0] return recent_time_series.points[0].value From c2c3ff86819941dca928b84280feef8c97ebe7bf Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Mon, 15 May 2017 15:12:44 -0700 Subject: [PATCH 10/12] fix tests --- bigtable/autoscaler/autoscaler.py | 14 ++++++++++---- bigtable/autoscaler/autoscaler_test.py | 5 +++-- bigtable/autoscaler/requirements.txt | 1 + 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index 449ca886bef..bcd7ad99c71 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -37,7 +37,7 @@ def get_cpu_load(): return recent_time_series.points[0].value -def scale_bigtable(bigtable_instance, scale_up): +def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): """Scales the number of Bigtable nodes up or down. Edits the number of nodes in the Bigtable cluster to be increased @@ -47,13 +47,14 @@ def scale_bigtable(bigtable_instance, scale_up): Args: bigtable_instance (str): Cloud Bigtable instance id to scale + bigtable_cluster (str): Cloud Bigtable cluster id to scale scale_up (bool): If true, scale up, otherwise scale down """ bigtable_client = bigtable.Client(admin=True) instance = bigtable_client.instance(bigtable_instance) instance.reload() - cluster = instance.cluster('{}-cluster'.format(bigtable_instance)) + cluster = instance.cluster(bigtable_cluster) cluster.reload() current_node_count = cluster.serve_nodes @@ -77,6 +78,7 @@ def scale_bigtable(bigtable_instance, scale_up): def main( bigtable_instance, + bigtable_cluster, high_cpu_threshold, low_cpu_threshold, short_sleep, @@ -94,10 +96,10 @@ def main( cluster_cpu = get_cpu_load() print('Detected cpu of {}'.format(cluster_cpu)) if cluster_cpu > high_cpu_threshold: - scale_bigtable(bigtable_instance, True) + scale_bigtable(bigtable_instance, bigtable_cluster, True) time.sleep(long_sleep) elif cluster_cpu < low_cpu_threshold: - scale_bigtable(bigtable_instance, False) + scale_bigtable(bigtable_instance, bigtable_cluster, False) time.sleep(short_sleep) else: print('CPU within threshold, sleeping.') @@ -110,6 +112,9 @@ def main( parser.add_argument( 'bigtable_instance', help='ID of the Cloud Bigtable instance to ' 'connect to.') + parser.add_argument( + 'bigtable_cluster', help='ID of the Cloud Bigtable cluster to ' + 'connect to.') parser.add_argument( '--high_cpu_threshold', help='If Bigtable CPU usages is above this threshold, scale up', @@ -133,6 +138,7 @@ def main( while True: main( args.bigtable_instance, + args.bigtable_cluster, float(args.high_cpu_threshold), float(args.low_cpu_threshold), int(args.short_sleep), diff --git a/bigtable/autoscaler/autoscaler_test.py b/bigtable/autoscaler/autoscaler_test.py index 29a131fa1c6..99d4e2c56cc 100644 --- a/bigtable/autoscaler/autoscaler_test.py +++ b/bigtable/autoscaler/autoscaler_test.py @@ -24,6 +24,7 @@ from autoscaler import main from autoscaler import scale_bigtable +# tests assume instance and cluster have the same ID BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER'] # System tests to verify API calls succeed @@ -38,7 +39,7 @@ def test_scale_bigtable(): instance = bigtable_client.instance(BIGTABLE_INSTANCE) instance.reload() - cluster = instance.cluster('{}-cluster'.format(BIGTABLE_INSTANCE)) + cluster = instance.cluster(BIGTABLE_INSTANCE) cluster.reload() original_node_count = cluster.serve_nodes @@ -50,7 +51,7 @@ def test_scale_bigtable(): new_node_count = cluster.serve_nodes assert (new_node_count == (original_node_count + 2)) - scale_bigtable(BIGTABLE_INSTANCE, False) + scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) time.sleep(3) cluster.reload() final_node_count = cluster.serve_nodes diff --git a/bigtable/autoscaler/requirements.txt b/bigtable/autoscaler/requirements.txt index 3f7710cd49d..9f91a227496 100644 --- a/bigtable/autoscaler/requirements.txt +++ b/bigtable/autoscaler/requirements.txt @@ -1,2 +1,3 @@ +google-cloud-core==0.24.1 google-cloud-bigtable==0.23.1 google-cloud-monitoring==0.24.0 From 87b06529e1bba166d442d09b7501e4cb10dbdcb4 Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Mon, 15 May 2017 16:32:43 -0700 Subject: [PATCH 11/12] fix tests --- bigtable/autoscaler/autoscaler.py | 8 ++++---- bigtable/autoscaler/autoscaler_test.py | 17 +++++++++++------ bigtable/autoscaler/requirements.txt | 1 - 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/bigtable/autoscaler/autoscaler.py b/bigtable/autoscaler/autoscaler.py index bcd7ad99c71..fb09b6b3693 100644 --- a/bigtable/autoscaler/autoscaler.py +++ b/bigtable/autoscaler/autoscaler.py @@ -110,11 +110,11 @@ def main( parser = argparse.ArgumentParser( description='Scales Bigtable clusters based on CPU usage.') parser.add_argument( - 'bigtable_instance', help='ID of the Cloud Bigtable instance to ' - 'connect to.') + 'bigtable_instance', + help='ID of the Cloud Bigtable instance to connect to.') parser.add_argument( - 'bigtable_cluster', help='ID of the Cloud Bigtable cluster to ' - 'connect to.') + 'bigtable_cluster', + help='ID of the Cloud Bigtable cluster to connect to.') parser.add_argument( '--high_cpu_threshold', help='If Bigtable CPU usages is above this threshold, scale up', diff --git a/bigtable/autoscaler/autoscaler_test.py b/bigtable/autoscaler/autoscaler_test.py index 99d4e2c56cc..5c710746648 100644 --- a/bigtable/autoscaler/autoscaler_test.py +++ b/bigtable/autoscaler/autoscaler_test.py @@ -43,7 +43,7 @@ def test_scale_bigtable(): cluster.reload() original_node_count = cluster.serve_nodes - scale_bigtable(BIGTABLE_INSTANCE, True) + scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) time.sleep(3) cluster.reload() @@ -68,17 +68,22 @@ def test_main(scale_bigtable, get_cpu_load, sleep): LONG_SLEEP = 10 get_cpu_load.return_value = 0.5 - main(BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, LONG_SLEEP) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, + LONG_SLEEP) scale_bigtable.assert_not_called() scale_bigtable.reset_mock() get_cpu_load.return_value = 0.7 - main(BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, True) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, True) scale_bigtable.reset_mock() get_cpu_load.return_value = 0.2 - main(BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, False) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, False) scale_bigtable.reset_mock() diff --git a/bigtable/autoscaler/requirements.txt b/bigtable/autoscaler/requirements.txt index 9f91a227496..3f7710cd49d 100644 --- a/bigtable/autoscaler/requirements.txt +++ b/bigtable/autoscaler/requirements.txt @@ -1,3 +1,2 @@ -google-cloud-core==0.24.1 google-cloud-bigtable==0.23.1 google-cloud-monitoring==0.24.0 From 2b4f39d3c6bfcb4dd025360b755faf951a8c868d Mon Sep 17 00:00:00 2001 From: Bill Prin Date: Mon, 15 May 2017 18:43:35 -0700 Subject: [PATCH 12/12] fix reqs --- bigtable/autoscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bigtable/autoscaler/requirements.txt b/bigtable/autoscaler/requirements.txt index 3f7710cd49d..0164caf3a66 100644 --- a/bigtable/autoscaler/requirements.txt +++ b/bigtable/autoscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==0.23.1 +google-cloud-bigtable==0.24.0 google-cloud-monitoring==0.24.0