8000 Update Bigtable Programmatic Scaling Example (#1003) · jaytoday/python-docs-samples@bc0924a · GitHub
[go: up one dir, main page]

Skip to content

Commit bc0924a

Browse files
authored
Update Bigtable Programmatic Scaling Example (GoogleCloudPlatform#1003)
* Update Bigtable Programmatic Scaling Example * Rename "autoscaling" to "metricscaler" and the the term "programmatic scaling" * Remove `strategies.py` to simplify example * Fix wrong sleep length bug * Add maximum node count * hegemonic review
1 parent 57fbe30 commit bc0924a

File tree

7 files changed

+47
-112
lines changed

7 files changed

+47
-112
lines changed

bigtable/autoscaler/strategies.py

Lines changed: 0 additions & 51 deletions
This file was deleted.

bigtable/autoscaler/strategies_test.py

Lines changed: 0 additions & 30 deletions
This file was deleted.

bigtable/autoscaler/README.rst renamed to bigtable/metricscaler/README.rst

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ Install Dependencies
7474
Samples
7575
-------------------------------------------------------------------------------
7676

77-
Autoscaling example
77+
Metricscaling example
7878
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
7979

8080

@@ -83,12 +83,12 @@ To run this sample:
8383

8484
.. code-block:: bash
8585
86-
$ python autoscaler.py
86+
$ python metricscaler.py
8787
88-
usage: autoscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
89-
[--low_cpu_threshold LOW_CPU_THRESHOLD]
90-
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
91-
bigtable_instance bigtable_cluster
88+
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
89+
[--low_cpu_threshold LOW_CPU_THRESHOLD]
90+
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
91+
bigtable_instance bigtable_cluster
9292
9393
Scales Cloud Bigtable clusters based on CPU usage.
9494

bigtable/autoscaler/README.rst.in renamed to bigtable/metricscaler/README.rst.in

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ setup:
2020
- install_deps
2121

2222
samples:
23-
- name: Autoscaling example
24-
file: autoscaler.py
23+
- name: Metricscaling example
24+
file: metricscaler.py
2525
show_help: true
2626

2727
cloud_client_library: true

bigtable/autoscaler/autoscaler.py renamed to bigtable/metricscaler/metricscaler.py

Lines changed: 31 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from google.cloud import bigtable
2222
from google.cloud import monitoring
2323

24-
import strategies
2524

2625

2726
def get_cpu_load():
@@ -52,6 +51,23 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
5251
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
5352
scale_up (bool): If true, scale up, otherwise scale down
5453
"""
54+
_MIN_NODE_COUNT = 3
55+
"""
56+
The minimum number of nodes to use. The default minimum is 3. If you have a
57+
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
58+
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
59+
metric is useful in figuring out the minimum number of nodes.
60+
"""
61+
62+
_MAX_NODE_COUNT = 30
63+
"""
64+
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
65+
If you need more quota, you can request more by following the instructions
66+
<a href="https://cloud.google.com/bigtable/quota">here</a>.
67+
"""
68+
69+
_SIZE_CHANGE_STEP = 3
70+
"""The number of nodes to change the cluster by."""
5571
# [START bigtable_scale]
5672
bigtable_client = bigtable.Client(admin=True)
5773
instance = bigtable_client.instance(bigtable_instance)
@@ -62,21 +78,21 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
6278

6379
current_node_count = cluster.serve_nodes
6480

65-
if current_node_count <= 3 and not scale_up:
66-
# Can't downscale lower than 3 nodes
67-
return
68-
6981
if scale_up:
70-
strategies_dict = strategies.UPSCALE_STRATEGIES
82+
if current_node_count < _MAX_NODE_COUNT:
83+
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
84+
cluster.serve_nodes = new_node_count
85+
cluster.update()
86+
print('Scaled up from {} to {} nodes.'.format(
87+
current_node_count, new_node_count))
7188
else:
72-
strategies_dict = strategies.DOWNSCALE_STRATEGIES
73-
74-
strategy = strategies_dict['incremental']
75-
new_node_count = strategy(cluster.serve_nodes)
76-
cluster.serve_nodes = new_node_count
77-
cluster.update()
78-
print('Scaled from {} up to {} nodes.'.format(
79-
current_node_count, new_node_count))
89+
if current_node_count > _MIN_NODE_COUNT:
90+
new_node_count = max(
91+
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
92+
cluster.serve_nodes = new_node_count
93+
cluster.update()
94+
print('Scaled down from {} to {} nodes.'.format(
95+
current_node_count, new_node_count))
8096
# [END bigtable_scale]
8197

8298

@@ -104,7 +120,7 @@ def main(
104120
time.sleep(long_sleep)
105121
elif cluster_cpu < low_cpu_threshold:
106122
scale_bigtable(bigtable_instance, bigtable_cluster, False)
107-
time.sleep(short_sleep)
123+
time.sleep(long_sleep)
108124
else:
109125
print('CPU within threshold, sleeping.')
110126
time.sleep(short_sleep)

bigtable/autoscaler/autoscaler_test.py renamed to bigtable/metricscaler/metricscaler_test.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,18 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
"""Unit and system tests for autoscaler.py"""
15+
"""Unit and system tests for metricscaler.py"""
1616

1717
import os
1818
import time
1919

2020
from google.cloud import bigtable
2121
from mock import patch
2222

23-
from autoscaler import get_cpu_load
24-
from autoscaler import main
25-
from autoscaler import scale_bigtable
23+
from metricscaler import _SIZE_CHANGE_STEP
24+
from metricscaler import get_cpu_load
25+
from metricscaler import main
26+
from metricscaler import scale_bigtable
2627

2728
# tests assume instance and cluster have the same ID
2829
BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER']
@@ -49,7 +50,7 @@ def test_scale_bigtable():
4950
cluster.reload()
5051

5152
new_node_count = cluster.serve_nodes
52-
assert (new_node_count == (original_node_count + 2))
53+
assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP))
5354

5455
scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
5556
time.sleep(3)
@@ -59,10 +60,9 @@ def test_scale_bigtable():
5960

6061

6162
# Unit test for logic
62-
6363
@patch('time.sleep')
64-
@patch('autoscaler.get_cpu_load')
65-
@patch('autoscaler.scale_bigtable')
64+
@patch('metricscaler.get_cpu_load')
65+
@patch('metricscaler.scale_bigtable')
6666
def test_main(scale_bigtable, get_cpu_load, sleep):
6767
SHORT_SLEEP = 5
6868
LONG_SLEEP = 10

0 commit comments

Comments
 (0)
0