Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Update Bigtable Programmatic Scaling Example #1003

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 27, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 0 additions & 51 deletions 51 bigtable/autoscaler/strategies.py

This file was deleted.

30 changes: 0 additions & 30 deletions 30 bigtable/autoscaler/strategies_test.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ Install Dependencies
Samples
-------------------------------------------------------------------------------

Autoscaling example
Metricscaling example
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


Expand All @@ -83,12 +83,12 @@ To run this sample:

.. code-block:: bash

$ python autoscaler.py
$ python metricscaler.py

usage: autoscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster

Scales Cloud Bigtable clusters based on CPU usage.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ setup:
- install_deps

samples:
- name: Autoscaling example
file: autoscaler.py
- name: Metricscaling example
file: metricscaler.py
show_help: true

cloud_client_library: true
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from google.cloud import bigtable
from google.cloud import monitoring

import strategies


def get_cpu_load():
Expand Down Expand Up @@ -52,6 +51,23 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
scale_up (bool): If true, scale up, otherwise scale down
"""
_MIN_NODE_COUNT = 3
"""
The minimum number of nodes to use. The default minimum is 3. If you have a
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
metric is useful in figuring out the minimum number of nodes.
"""

_MAX_NODE_COUNT = 30
"""
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
If you need more quota, you can request more by following the instructions
<a href="https://cloud.google.com/bigtable/quota">here</a>.
"""

_SIZE_CHANGE_STEP = 3
"""The number of nodes to change the cluster by."""
# [START bigtable_scale]
bigtable_client = bigtable.Client(admin=True)
instance = bigtable_client.instance(bigtable_instance)
Expand All @@ -62,21 +78,21 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):

current_node_count = cluster.serve_nodes

if current_node_count <= 3 and not scale_up:
# Can't downscale lower than 3 nodes
return

if scale_up:
strategies_dict = strategies.UPSCALE_STRATEGIES
if current_node_count < _MAX_NODE_COUNT:
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled up from {} to {} nodes.'.format(
current_node_count, new_node_count))
else:
strategies_dict = strategies.DOWNSCALE_STRATEGIES

strategy = strategies_dict['incremental']
new_node_count = strategy(cluster.serve_nodes)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled from {} up to {} nodes.'.format(
current_node_count, new_node_count))
if current_node_count > _MIN_NODE_COUNT:
new_node_count = max(
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled down from {} to {} nodes.'.format(
current_node_count, new_node_count))
# [END bigtable_scale]


Expand Down Expand Up @@ -104,7 +120,7 @@ def main(
time.sleep(long_sleep)
elif cluster_cpu < low_cpu_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, False)
time.sleep(short_sleep)
time.sleep(long_sleep)
else:
print('CPU within threshold, sleeping.')
time.sleep(short_sleep)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.

"""Unit and system tests for autoscaler.py"""
"""Unit and system tests for metricscaler.py"""

import os
import time

from google.cloud import bigtable
from mock import patch

from autoscaler import get_cpu_load
from autoscaler import main
from autoscaler import scale_bigtable
from metricscaler import _SIZE_CHANGE_STEP
from metricscaler import get_cpu_load
from metricscaler import main
from metricscaler import scale_bigtable

# tests assume instance and cluster have the same ID
BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER']
Expand All @@ -49,7 +50,7 @@ def test_scale_bigtable():
cluster.reload()

new_node_count = cluster.serve_nodes
assert (new_node_count == (original_node_count + 2))
assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP))

scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
time.sleep(3)
Expand All @@ -59,10 +60,9 @@ def test_scale_bigtable():


# Unit test for logic

@patch('time.sleep')
@patch('autoscaler.get_cpu_load')
@patch('autoscaler.scale_bigtable')
@patch('metricscaler.get_cpu_load')
@patch('metricscaler.scale_bigtable')
def test_main(scale_bigtable, get_cpu_load, sleep):
SHORT_SLEEP = 5
LONG_SLEEP = 10
Expand Down
Morty Proxy This is a proxified and sanitized view of the page, visit original site.