Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 11cbbfd

Browse filesBrowse files
author
Jon Wayne Parrott
committed
Merge pull request GoogleCloudPlatform#196 from GoogleCloudPlatform/pytest-bigquery
Moving bigquery samples to py.test
2 parents cedb8a1 + 534fe10 commit 11cbbfd
Copy full SHA for 11cbbfd
Expand file treeCollapse file tree

8 files changed

+158
-171
lines changed

‎bigquery/api/async_query_test.py

Copy file name to clipboardExpand all lines: bigquery/api/async_query_test.py
+13-17Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,25 +14,21 @@
1414
import json
1515

1616
from async_query import main
17-
import testing
1817

1918

20-
class TestAsyncQuery(testing.CloudTest):
19+
def test_async_query(cloud_config, capsys):
20+
query = (
21+
'SELECT corpus FROM publicdata:samples.shakespeare '
22+
'GROUP BY corpus;')
2123

22-
def test_async_query(self):
23-
query = (
24-
'SELECT corpus FROM publicdata:samples.shakespeare '
25-
'GROUP BY corpus;')
24+
main(
25+
project_id=cloud_config.GCLOUD_PROJECT,
26+
query_string=query,
27+
batch=False,
28+
num_retries=5,
29+
interval=1)
2630

27-
with testing.capture_stdout() as stdout:
28-
main(
29-
project_id=self.config.GCLOUD_PROJECT,
30-
query_string=query,
31-
batch=False,
32-
num_retries=5,
33-
interval=1)
31+
out, _ = capsys.readouterr()
32+
value = out.strip().split('\n').pop()
3433

35-
value = stdout.getvalue().strip().split('\n').pop()
36-
37-
self.assertIsNotNone(
38-
json.loads(value))
34+
assert json.loads(value) is not None

‎bigquery/api/export_data_to_cloud_storage_test.py

Copy file name to clipboardExpand all lines: bigquery/api/export_data_to_cloud_storage_test.py
+46-44Lines changed: 46 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -12,47 +12,49 @@
1212
# limitations under the License.
1313

1414
from export_data_to_cloud_storage import main
15-
import pytest
16-
from testing import CloudTest
17-
18-
19-
@pytest.mark.slow
20-
class TestExportTableToGCS(CloudTest):
21-
dataset_id = 'test_dataset'
22-
table_id = 'test_table'
23-
24-
def test_export_table_csv(self):
25-
cloud_storage_output_uri = \
26-
'gs://{}/output.csv'.format(self.config.CLOUD_STORAGE_BUCKET)
27-
main(
28-
cloud_storage_output_uri,
29-
self.config.GCLOUD_PROJECT,
30-
self.dataset_id,
31-
self.table_id,
32-
num_retries=5,
33-
interval=1,
34-
export_format="CSV")
35-
36-
def test_export_table_json(self):
37-
cloud_storage_output_uri = \
38-
'gs://{}/output.json'.format(self.config.CLOUD_STORAGE_BUCKET)
39-
main(
40-
cloud_storage_output_uri,
41-
self.config.GCLOUD_PROJECT,
42-
self.dataset_id,
43-
self.table_id,
44-
num_retries=5,
45-
interval=1,
46-
export_format="NEWLINE_DELIMITED_JSON")
47-
48-
def test_export_table_avro(self):
49-
cloud_storage_output_uri = \
50-
'gs://{}/output.avro'.format(self.config.CLOUD_STORAGE_BUCKET)
51-
main(
52-
cloud_storage_output_uri,
53-
self.config.GCLOUD_PROJECT,
54-
self.dataset_id,
55-
self.table_id,
56-
num_retries=5,
57-
interval=1,
58-
export_format="AVRO")
15+
from testing import mark_flaky
16+
17+
DATASET_ID = 'test_dataset'
18+
TABLE_ID = 'test_table'
19+
20+
21+
@mark_flaky
22+
def test_export_table_csv(cloud_config):
23+
cloud_storage_output_uri = \
24+
'gs://{}/output.csv'.format(cloud_config.CLOUD_STORAGE_BUCKET)
25+
main(
26+
cloud_storage_output_uri,
27+
cloud_config.GCLOUD_PROJECT,
28+
DATASET_ID,
29+
TABLE_ID,
30+
num_retries=5,
31+
interval=1,
32+
export_format="CSV")
33+
34+
35+
@mark_flaky
36+
def test_export_table_json(cloud_config):
37+
cloud_storage_output_uri = \
38+
'gs://{}/output.json'.format(cloud_config.CLOUD_STORAGE_BUCKET)
39+
main(
40+
cloud_storage_output_uri,
41+
cloud_config.GCLOUD_PROJECT,
42+
DATASET_ID,
43+
TABLE_ID,
44+
num_retries=5,
45+
interval=1,
46+
export_format="NEWLINE_DELIMITED_JSON")
47+
48+
49+
@mark_flaky
50+
def test_export_table_avro(cloud_config):
51+
cloud_storage_output_uri = \
52+
'gs://{}/output.avro'.format(cloud_config.CLOUD_STORAGE_BUCKET)
53+
main(
54+
cloud_storage_output_uri,
55+
cloud_config.GCLOUD_PROJECT,
56+
DATASET_ID,
57+
TABLE_ID,
58+
num_retries=5,
59+
interval=1,
60+
export_format="AVRO")

‎bigquery/api/getting_started_test.py

Copy file name to clipboardExpand all lines: bigquery/api/getting_started_test.py
+6-8Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,12 @@
1414
import re
1515

1616
from getting_started import main
17-
import testing
1817

1918

20-
class TestGettingStarted(testing.CloudTest):
21-
def test_main(self):
22-
with testing.capture_stdout() as mock_stdout:
23-
main(self.config.GCLOUD_PROJECT)
19+
def test_main(cloud_config, capsys):
20+
main(cloud_config.GCLOUD_PROJECT)
2421

25-
stdout = mock_stdout.getvalue()
26-
self.assertRegexpMatches(stdout, re.compile(
27-
r'Query Results:.hamlet', re.DOTALL))
22+
out, _ = capsys.readouterr()
23+
24+
assert re.search(re.compile(
25+
r'Query Results:.hamlet', re.DOTALL), out)

‎bigquery/api/list_datasets_projects_test.py

Copy file name to clipboardExpand all lines: bigquery/api/list_datasets_projects_test.py
+7-11Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,14 @@
1414
import re
1515

1616
from list_datasets_projects import main
17-
import testing
1817

1918

20-
class TestListDatasetsProjects(testing.CloudTest):
19+
def test_main(cloud_config, capsys):
20+
main(cloud_config.GCLOUD_PROJECT)
2121

22-
def test_main(self):
23-
with testing.capture_stdout() as mock_stdout:
24-
main(self.config.GCLOUD_PROJECT)
22+
out, _ = capsys.readouterr()
2523

26-
stdout = mock_stdout.getvalue()
27-
28-
self.assertRegexpMatches(stdout, re.compile(
29-
r'Project list:.*bigquery#projectList.*projects', re.DOTALL))
30-
self.assertRegexpMatches(stdout, re.compile(
31-
r'Dataset list:.*datasets.*datasetId', re.DOTALL))
24+
assert re.search(re.compile(
25+
r'Project list:.*bigquery#projectList.*projects', re.DOTALL), out)
26+
assert re.search(re.compile(
27+
r'Dataset list:.*datasets.*datasetId', re.DOTALL), out)

‎bigquery/api/load_data_by_post_test.py

Copy file name to clipboardExpand all lines: bigquery/api/load_data_by_post_test.py
+42-41Lines changed: 42 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -14,44 +14,45 @@
1414
import re
1515

1616
from load_data_by_post import load_data
17-
import pytest
18-
import testing
19-
20-
21-
@pytest.mark.slow
22-
class TestLoadDataByPost(testing.CloudTest):
23-
dataset_id = 'ephemeral_test_dataset'
24-
table_id = 'load_data_by_post'
25-
26-
def test_load_csv_data(self):
27-
schema_path = self.resource_path('schema.json')
28-
data_path = self.resource_path('data.csv')
29-
with testing.capture_stdout() as mock_stdout:
30-
load_data(schema_path,
31-
data_path,
32-
self.config.GCLOUD_PROJECT,
33-
self.dataset_id,
34-
self.table_id
35-
)
36-
37-
stdout = mock_stdout.getvalue()
38-
39-
self.assertRegexpMatches(stdout, re.compile(
40-
r'Waiting for job to finish.*Job complete.', re.DOTALL))
41-
42-
def test_load_json_data(self):
43-
schema_path = self.resource_path('schema.json')
44-
data_path = self.resource_path('data.json')
45-
46-
with testing.capture_stdout() as mock_stdout:
47-
load_data(schema_path,
48-
data_path,
49-
self.config.GCLOUD_PROJECT,
50-
self.dataset_id,
51-
self.table_id
52-
)
53-
54-
stdout = mock_stdout.getvalue()
55-
56-
self.assertRegexpMatches(stdout, re.compile(
57-
r'Waiting for job to finish.*Job complete.', re.DOTALL))
17+
from testing import mark_flaky
18+
19+
DATASET_ID = 'ephemeral_test_dataset'
20+
TABLE_ID = 'load_data_by_post'
21+
22+
23+
@mark_flaky
24+
def test_load_csv_data(cloud_config, resource, capsys):
25+
schema_path = resource('schema.json')
26+
data_path = resource('data.csv')
27+
28+
load_data(
29+
schema_path,
30+
data_path,
31+
cloud_config.GCLOUD_PROJECT,
32+
DATASET_ID,
33+
TABLE_ID
34+
)
35+
36+
out, _ = capsys.readouterr()
37+
38+
assert re.search(re.compile(
39+
r'Waiting for job to finish.*Job complete.', re.DOTALL), out)
40+
41+
42+
@mark_flaky
43+
def test_load_json_data(cloud_config, resource, capsys):
44+
schema_path = resource('schema.json')
45+
data_path = resource('data.json')
46+
47+
load_data(
48+
schema_path,
49+
data_path,
50+
cloud_config.GCLOUD_PROJECT,
51+
DATASET_ID,
52+
TABLE_ID
53+
)
54+
55+
out, _ = capsys.readouterr()
56+
57+
assert re.search(re.compile(
58+
r'Waiting for job to finish.*Job complete.', re.DOTALL), out)

‎bigquery/api/load_data_from_csv_test.py

Copy file name to clipboardExpand all lines: bigquery/api/load_data_from_csv_test.py
+17-18Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -13,25 +13,24 @@
1313

1414

1515
from load_data_from_csv import main
16-
import pytest
17-
from testing import CloudTest
16+
from testing import mark_flaky
1817

18+
DATASET_ID = 'test_dataset'
19+
TABLE_ID = 'test_import_table'
1920

20-
@pytest.mark.slow
21-
class TestLoadDataFromCSV(CloudTest):
22-
dataset_id = 'test_dataset'
23-
table_id = 'test_import_table'
2421

25-
def test_load_table(self):
26-
cloud_storage_input_uri = 'gs://{}/data.csv'.format(
27-
self.config.CLOUD_STORAGE_BUCKET)
28-
schema_file = self.resource_path('schema.json')
22+
@mark_flaky
23+
def test_load_table(cloud_config, resource):
24+
cloud_storage_input_uri = 'gs://{}/data.csv'.format(
25+
cloud_config.CLOUD_STORAGE_BUCKET)
26+
schema_file = resource('schema.json')
2927

30-
main(
31-
self.config.GCLOUD_PROJECT,
32-
self.dataset_id,
33-
self.table_id,
34-
schema_file=schema_file,
35-
data_path=cloud_storage_input_uri,
36-
poll_interval=1,
37-
num_retries=5)
28+
main(
29+
cloud_config.GCLOUD_PROJECT,
30+
DATASET_ID,
31+
TABLE_ID,
32+
schema_file=schema_file,
33+
data_path=cloud_storage_input_uri,
34+
poll_interval=1,
35+
num_retries=5
36+
)

‎bigquery/api/streaming_test.py

Copy file name to clipboardExpand all lines: bigquery/api/streaming_test.py
+15-18Lines changed: 15 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -14,28 +14,25 @@
1414
import json
1515

1616
import streaming
17-
from testing import capture_stdout, CloudTest
1817

1918

20-
class TestStreaming(CloudTest):
21-
dataset_id = 'test_dataset'
22-
table_id = 'test_table'
19+
DATASET_ID = 'test_dataset'
20+
TABLE_ID = 'test_table'
2321

24-
def test_stream_row_to_bigquery(self):
25-
with open(
26-
self.resource_path('streamrows.json'),
27-
'r') as rows_file:
2822

29-
rows = json.load(rows_file)
23+
def test_stream_row_to_bigquery(cloud_config, resource, capsys):
24+
with open(resource('streamrows.json'), 'r') as rows_file:
25+
rows = json.load(rows_file)
3026

31-
streaming.get_rows = lambda: rows
27+
streaming.get_rows = lambda: rows
3228

33-
with capture_stdout() as stdout:
34-
streaming.main(
35-
self.config.GCLOUD_PROJECT,
36-
self.dataset_id,
37-
self.table_id,
38-
num_retries=5)
29+
streaming.main(
30+
cloud_config.GCLOUD_PROJECT,
31+
DATASET_ID,
32+
TABLE_ID,
33+
num_retries=5)
3934

40-
results = stdout.getvalue().split('\n')
41-
self.assertIsNotNone(json.loads(results[0]))
35+
out, _ = capsys.readouterr()
36+
results = out.split('\n')
37+
38+
assert json.loads(results[0]) is not None

‎bigquery/api/sync_query_test.py

Copy file name to clipboardExpand all lines: bigquery/api/sync_query_test.py
+12-14Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -14,22 +14,20 @@
1414
import json
1515

1616
from sync_query import main
17-
from testing import capture_stdout, CloudTest
1817

1918

20-
class TestSyncQuery(CloudTest):
19+
def test_sync_query(cloud_config, capsys):
20+
query = (
21+
'SELECT corpus FROM publicdata:samples.shakespeare '
22+
'GROUP BY corpus;')
2123

22-
def test_sync_query(self):
23-
query = (
24-
'SELECT corpus FROM publicdata:samples.shakespeare '
25-
'GROUP BY corpus;')
24+
main(
25+
project_id=cloud_config.GCLOUD_PROJECT,
26+
query=query,
27+
timeout=30,
28+
num_retries=5)
2629

27-
with capture_stdout() as stdout:
28-
main(
29-
project_id=self.config.GCLOUD_PROJECT,
30-
query=query,
31-
timeout=30,
32-
num_retries=5)
30+
out, _ = capsys.readouterr()
31+
result = out.split('\n')[0]
3332

34-
result = stdout.getvalue().split('\n')[0]
35-
self.assertIsNotNone(json.loads(result))
33+
assert json.loads(result) is not None

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.