Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 936e77f

Browse filesBrowse files
authored
Merge branch 'master' into patch-15
2 parents 2e1afda + 3619a77 commit 936e77f
Copy full SHA for 936e77f

File tree

Expand file treeCollapse file tree

13 files changed

+361
-81
lines changed
Filter options
Expand file treeCollapse file tree

13 files changed

+361
-81
lines changed

‎appengine/flexible/tasks/Dockerfile

Copy file name to clipboard
+17Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
# Use the official Python image.
2+
# https://hub.docker.com/_/python
3+
FROM python:3.7
4+
5+
# Copy local code to the container image.
6+
ENV APP_HOME /app
7+
WORKDIR $APP_HOME
8+
COPY . .
9+
10+
# Install production dependencies.
11+
RUN pip install Flask gunicorn
12+
13+
# Run the web service on container startup. Here we use the gunicorn
14+
# webserver, with one worker process and 8 threads.
15+
# For environments with multiple CPU cores, increase the number of workers
16+
# to be equal to the cores available.
17+
CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 main:app

‎appengine/flexible/tasks/README.md

Copy file name to clipboardExpand all lines: appengine/flexible/tasks/README.md
-21Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -101,24 +101,3 @@ endpoint, with a payload specified:
101101
```
102102
python create_app_engine_queue_task.py --project=$PROJECT_ID --queue=$QUEUE_ID --location=$LOCATION_ID --payload=hello
103103
```
104-
105-
### Using HTTP Push Queues
106-
107-
Set an environment variable for the endpoint to your task handler. This is an
108-
example url to send requests to the App Engine task handler:
109-
```
110-
export URL=https://<project_id>.appspot.com/example_task_handler
111-
```
112-
113-
Running the sample will create a task and send the task to the specific URL
114-
endpoint, with a payload specified:
115-
116-
```
117-
python create_http_task.py --project=$PROJECT_ID --queue=$QUEUE_ID --location=$LOCATION_ID --url=$URL --payload=hello
118-
```
119-
120-
Now view that the payload was received and verify the payload:
121-
122-
```
123-
gcloud app logs read
124-
```
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
Flask==1.0.2
22
gunicorn==19.9.0
3-
google-cloud-tasks==0.6.0
3+
google-cloud-tasks==0.7.0

‎bigtable/hello/main.py

Copy file name to clipboardExpand all lines: bigtable/hello/main.py
+16-16Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -25,24 +25,24 @@
2525
"""
2626

2727
import argparse
28-
# [START dependencies]
28+
# [START bigtable_hw_imports]
2929
import datetime
3030

3131
from google.cloud import bigtable
3232
from google.cloud.bigtable import column_family
3333
from google.cloud.bigtable import row_filters
34-
# [END dependencies]
34+
# [END bigtable_hw_imports]
3535

3636

3737
def main(project_id, instance_id, table_id):
38-
# [START connecting_to_bigtable]
38+
# [START bigtable_hw_connect]
3939
# The client must be created with admin=True because it will create a
4040
# table.
4141
client = bigtable.Client(project=project_id, admin=True)
4242
instance = client.instance(instance_id)
43-
# [END connecting_to_bigtable]
43+
# [END bigtable_hw_connect]
4444

45-
# [START creating_a_table]
45+
# [START bigtable_hw_create_table]
4646
print('Creating the {} table.'.format(table_id))
4747
table = instance.table(table_id)
4848

@@ -56,9 +56,9 @@ def main(project_id, instance_id, table_id):
5656
table.create(column_families=column_families)
5757
else:
5858
print("Table {} already exists.".format(table_id))
59-
# [END creating_a_table]
59+
# [END bigtable_hw_create_table]
6060

61-
# [START writing_rows]
61+
# [START bigtable_hw_write_rows]
6262
print('Writing some greetings to the table.')
6363
greetings = ['Hello World!', 'Hello Cloud Bigtable!', 'Hello Python!']
6464
rows = []
@@ -82,36 +82,36 @@ def main(project_id, instance_id, table_id):
8282
timestamp=datetime.datetime.utcnow())
8383
rows.append(row)
8484
table.mutate_rows(rows)
85-
# [END writing_rows]
85+
# [END bigtable_hw_write_rows]
8686

87-
# [START creating_a_filter]
87+
# [START bigtable_hw_create_filter]
8888
# Create a filter to only retrieve the most recent version of the cell
8989
# for each column accross entire row.
9090
row_filter = row_filters.CellsColumnLimitFilter(1)
91-
# [END creating_a_filter]
91+
# [END bigtable_hw_create_filter]
9292

93-
# [START getting_a_row]
93+
# [START bigtable_hw_get_with_filter]
9494
print('Getting a single greeting by row key.')
9595
key = 'greeting0'.encode()
9696

9797
row = table.read_row(key, row_filter)
9898
cell = row.cells[column_family_id][column][0]
9999
print(cell.value.decode('utf-8'))
100-
# [END getting_a_row]
100+
# [END bigtable_hw_get_with_filter]
101101

102-
# [START scanning_all_rows]
102+
# [START bigtable_hw_scan_with_filter]
103103
print('Scanning for all greetings:')
104104
partial_rows = table.read_rows(filter_=row_filter)
105105

106106
for row in partial_rows:
107107
cell = row.cells[column_family_id][column][0]
108108
print(cell.value.decode('utf-8'))
109-
# [END scanning_all_rows]
109+
# [END bigtable_hw_scan_with_filter]
110110

111-
# [START deleting_a_table]
111+
# [START bigtable_hw_delete_table]
112112
print('Deleting the {} table.'.format(table_id))
113113
table.delete()
114-
# [END deleting_a_table]
114+
# [END bigtable_hw_delete_table]
115115

116116

117117
if __name__ == '__main__':

‎bigtable/hello_happybase/main.py

Copy file name to clipboardExpand all lines: bigtable/hello_happybase/main.py
+14-13Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -25,32 +25,33 @@
2525
"""
2626

2727
import argparse
28-
28+
# [START bigtable_hw_imports_happybase]
2929
from google.cloud import bigtable
3030
from google.cloud import happybase
31+
# [END bigtable_hw_imports_happybase]
3132

3233

3334
def main(project_id, instance_id, table_name):
34-
# [START connecting_to_bigtable]
35+
# [START bigtable_hw_connect_happybase]
3536
# The client must be created with admin=True because it will create a
3637
# table.
3738
client = bigtable.Client(project=project_id, admin=True)
3839
instance = client.instance(instance_id)
3940
connection = happybase.Connection(instance=instance)
40-
# [END connecting_to_bigtable]
41+
# [END bigtable_hw_connect_happybase]
4142

4243
try:
43-
# [START creating_a_table]
44+
# [START bigtable_hw_create_table_happybase]
4445
print('Creating the {} table.'.format(table_name))
4546
column_family_name = 'cf1'
4647
connection.create_table(
4748
table_name,
4849
{
4950
column_family_name: dict() # Use default options.
5051
})
51-
# [END creating_a_table]
52+
# [END bigtable_hw_create_table_happybase]
5253

53-
# [START writing_rows]
54+
# [START bigtable_hw_write_rows_happybase]
5455
print('Writing some greetings to the table.')
5556
table = connection.table(table_name)
5657
column_name = '{fam}:greeting'.format(fam=column_family_name)
@@ -75,26 +76,26 @@ def main(project_id, instance_id, table_name):
7576
table.put(
7677
row_key, {column_name.encode('utf-8'): value.encode('utf-8')}
7778
)
78-
# [END writing_rows]
79+
# [END bigtable_hw_write_rows_happybase]
7980

80-
# [START getting_a_row]
81+
# [START bigtable_hw_get_by_key_happybase]
8182
print('Getting a single greeting by row key.')
8283
key = 'greeting0'.encode('utf-8')
8384
row = table.row(key)
8485
print('\t{}: {}'.format(key, row[column_name.encode('utf-8')]))
85-
# [END getting_a_row]
86+
# [END bigtable_hw_get_by_key_happybase]
8687

87-
# [START scanning_all_rows]
88+
# [START bigtable_hw_scan_all_happybase]
8889
print('Scanning for all greetings:')
8990

9091
for key, row in table.scan():
9192
print('\t{}: {}'.format(key, row[column_name.encode('utf-8')]))
92-
# [END scanning_all_rows]
93+
# [END bigtable_hw_scan_all_happybase]
9394

94-
# [START deleting_a_table]
95+
# [START bigtable_hw_delete_table_happybase]
9596
print('Deleting the {} table.'.format(table_name))
9697
connection.delete_table(table_name)
97-
# [END deleting_a_table]
98+
# [END bigtable_hw_delete_table_happybase]
9899

99100
finally:
100101
connection.close()

‎opencensus/README.md

Copy file name to clipboard
+35Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
<img src="https://avatars2.githubusercontent.com/u/38480854?v=3&s=96" alt="OpenCensus logo" title="OpenCensus" align="right" height="96" width="96"/>
2+
3+
# OpenCensus Stackdriver Metrics Sample
4+
5+
[OpenCensus](https://opencensus.io) is a toolkit for collecting application
6+
performance and behavior data. OpenCensus includes utilities for distributed
7+
tracing, metrics collection, and context propagation within and between
8+
services.
9+
10+
This example demonstrates using the OpenCensus client to send metrics data to
11+
the [Stackdriver Monitoring](https://cloud.google.com/monitoring/docs/)
12+
backend.
13+
14+
## Prerequisites
15+
16+
Install the OpenCensus core and Stackdriver exporter libraries:
17+
18+
```sh
19+
pip install -r opencensus/requirements.txt
20+
```
21+
22+
Make sure that your environment is configured to [authenticate with
23+
GCP](https://cloud.google.com/docs/authentication/getting-started).
24+
25+
## Running the example
26+
27+
```sh
28+
python opencensus/metrics_quickstart.py
29+
```
30+
31+
The example generates a histogram of simulated latencies, which is exported to
32+
Stackdriver after 60 seconds. After it's exported, the histogram will be
33+
visible on the [Stackdriver Metrics
34+
Explorer](https://app.google.stackdriver.com/metrics-explorer) page as
35+
`OpenCensus/task_latency_view`.

‎opencensus/metrics_quickstart.py

Copy file name to clipboard
+76Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright 2019 Google Inc. All Rights Reserved.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
17+
# [START monitoring_opencensus_metrics_quickstart]
18+
19+
from random import random
20+
import time
21+
22+
from opencensus.ext.stackdriver import stats_exporter
23+
from opencensus.stats import aggregation
24+
from opencensus.stats import measure
25+
from opencensus.stats import stats
26+
from opencensus.stats import view
27+
28+
29+
# A measure that represents task latency in ms.
30+
LATENCY_MS = measure.MeasureFloat(
31+
"task_latency",
32+
"The task latency in milliseconds",
33+
"ms")
34+
35+
# A view of the task latency measure that aggregates measurements according to
36+
# a histogram with predefined bucket boundaries. This aggregate is periodically
37+
# exported to Stackdriver Monitoring.
38+
LATENCY_VIEW = view.View(
39+
"task_latency_distribution",
40+
"The distribution of the task latencies",
41+
[],
42+
LATENCY_MS,
43+
# Latency in buckets: [>=0ms, >=100ms, >=200ms, >=400ms, >=1s, >=2s, >=4s]
44+
aggregation.DistributionAggregation(
45+
[100.0, 200.0, 400.0, 1000.0, 2000.0, 4000.0]))
46+
47+
48+
def main():
49+
# Register the view. Measurements are only aggregated and exported if
50+
# they're associated with a registered view.
51+
stats.stats.view_manager.register_view(LATENCY_VIEW)
52+
53+
# Create the Stackdriver stats exporter and start exporting metrics in the
54+
# background, once every 60 seconds by default.
55+
exporter = stats_exporter.new_stats_exporter()
56+
print('Exporting stats to project "{}"'
57+
.format(exporter.options.project_id))
58+
59+
# Record 100 fake latency values between 0 and 5 seconds.
60+
for num in range(100):
61+
ms = random() * 5 * 1000
62+
print("Latency {}: {}".format(num, ms))
63+
64+
mmap = stats.stats.stats_recorder.new_measurement_map()
65+
mmap.measure_float_put(LATENCY_MS, ms)
66+
mmap.record()
67+
68+
# Keep the thread alive long enough for the exporter to export at least
69+
# once.
70+
time.sleep(65)
71+
72+
73+
if __name__ == '__main__':
74+
main()
75+
76+
# [END monitoring_opencensus_metrics_quickstart]

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.